diff --git a/404.html b/404.html index 425c9abbeda..185206cd1dc 100644 --- a/404.html +++ b/404.html @@ -7,7 +7,7 @@ Page Not Found | Apache Linkis - + @@ -15,7 +15,7 @@
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/Images/EngineUsage/python-configure.png b/Images/EngineUsage/python-configure.png new file mode 100644 index 00000000000..5a92d168c39 Binary files /dev/null and b/Images/EngineUsage/python-configure.png differ diff --git a/assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png b/assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png new file mode 100644 index 00000000000..5a92d168c39 Binary files /dev/null and b/assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png differ diff --git a/assets/js/08bd5166.704c48e5.js b/assets/js/08bd5166.8f04ad0b.js similarity index 51% rename from assets/js/08bd5166.704c48e5.js rename to assets/js/08bd5166.8f04ad0b.js index 1b4c7f64801..788ae559605 100644 --- a/assets/js/08bd5166.704c48e5.js +++ b/assets/js/08bd5166.8f04ad0b.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),i=a(72389),c=a(44996),r=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,i.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==r?void 0:r[e];return n.createElement("div",null,n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,c.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),i=a(72389),c=a(44996),r=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,i.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==r?void 0:r[e];return n.createElement("div",null,n.createElement("script",{src:"//cdn.matomo.cloud/apachelinkis.matomo.cloud/matomo.js"}),n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,c.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}}}]); \ No newline at end of file diff --git a/assets/js/25b6cbf3.58d3e98f.js b/assets/js/25b6cbf3.012976b7.js similarity index 77% rename from assets/js/25b6cbf3.58d3e98f.js rename to assets/js/25b6cbf3.012976b7.js index 7629ee71776..bc5175a9391 100644 --- a/assets/js/25b6cbf3.58d3e98f.js +++ b/assets/js/25b6cbf3.012976b7.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[90239],{3905:function(e,n,t){t.d(n,{Zo:function(){return u},kt:function(){return k}});var a=t(67294);function i(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function r(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function o(e){for(var n=1;n=0||(i[t]=e[t]);return i}(e,n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(i[t]=e[t])}return i}var l=a.createContext({}),p=function(e){var n=a.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):o(o({},n),e)),t},u=function(e){var n=p(e.components);return a.createElement(l.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},d=a.forwardRef((function(e,n){var t=e.components,i=e.mdxType,r=e.originalType,l=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),d=p(t),k=i,g=d["".concat(l,".").concat(k)]||d[k]||c[k]||r;return t?a.createElement(g,o(o({ref:n},u),{},{components:t})):a.createElement(g,o({ref:n},u))}));function k(e,n){var t=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var r=t.length,o=new Array(r);o[0]=d;var s={};for(var l in n)hasOwnProperty.call(n,l)&&(s[l]=n[l]);s.originalType=e,s.mdxType="string"==typeof e?e:i,o[1]=s;for(var p=2;p"," tag to 2.1.0, and then compile this module separately."),(0,r.kt)("h3",{id:"22-spark-engineconn-deployment-and-loading"},"2.2 spark engineConn deployment and loading"),(0,r.kt)("p",null,"If you have already compiled your spark EngineConn plug-in has been compiled, then you need to put the new plug-in to the specified location to load, you can refer to the following article for details"),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin Installation")," "),(0,r.kt)("h3",{id:"23-tags-of-spark-engineconn"},"2.3 tags of spark EngineConn"),(0,r.kt)("p",null,"Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)")," "),(0,r.kt)("h2",{id:"3-use-of-spark-engineconn"},"3. Use of spark EngineConn"),(0,r.kt)("h3",{id:"preparation-for-operation-queue-setting"},"Preparation for operation, queue setting"),(0,r.kt)("p",null,"Because the execution of spark is a resource that requires a queue, the user must set up a queue that he can execute before executing."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(67117).Z})),(0,r.kt)("p",null,"Figure 3-1 Queue settings"),(0,r.kt)("p",null,"You can also add the queue value in the StartUpMap of the submission parameter: ",(0,r.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,r.kt)("h3",{id:"31-how-to-use-linkis-sdk"},"3.1 How to use Linkis SDK"),(0,r.kt)("p",null,"Linkis provides a client method to call Spark tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to ",(0,r.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/sdk_manual"},"JAVA SDK Manual"),".\nIf you use Hive, you only need to make the following changes:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType\n')),(0,r.kt)("h3",{id:"32-how-to-use-linkis-cli"},"3.2 How to use Linkis-cli"),(0,r.kt)("p",null,"After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Spark is as follows:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,r.kt)("p",null,"The specific usage can refer to ",(0,r.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,r.kt)("h3",{id:"33-how-to-use-scriptis"},"3.3 How to use Scriptis"),(0,r.kt)("p",null,"The use of ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/WeBankFinTech/Scriptis"},"Scriptis")," is the simplest. You can directly enter Scriptis and create a new sql, scala or pyspark script for execution."),(0,r.kt)("p",null,"The sql method is the simplest. You can create a new sql script and write and execute it. When it is executed, the progress will be displayed. If the user does not have a spark EngineConn at the beginning, the execution of sql will start a spark session (it may take some time here),\nAfter the SparkSession is initialized, you can start to execute sql."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(82196).Z})),(0,r.kt)("p",null,"Figure 3-2 Screenshot of the execution effect of sparksql"),(0,r.kt)("p",null,"For spark-scala tasks, we have initialized sqlContext and other variables, and users can directly use this sqlContext to execute sql."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(91298).Z})),(0,r.kt)("p",null,"Figure 3-3 Execution effect diagram of spark-scala"),(0,r.kt)("p",null,"Similarly, in the way of pyspark, we have also initialized the SparkSession, and users can directly use spark.sql to execute SQL."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(47829).Z}),"\nFigure 3-4 pyspark execution mode"),(0,r.kt)("h2",{id:"4-spark-engineconn-user-settings"},"4. Spark EngineConn user settings"),(0,r.kt)("p",null,"In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(63593).Z})),(0,r.kt)("p",null,"Figure 4-1 Spark user-defined configuration management console"))}d.isMDXComponent=!0},47829:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-6e37a518afe1e104834abe4adecbbbf2.png"},67117:function(e,n,t){n.Z=t.p+"assets/images/queue-set-7c9c3c2db8e77ce7f948909adfd80398.png"},91298:function(e,n,t){n.Z=t.p+"assets/images/scala-run-a2982b184c1e726e9746ac040910d775.png"},63593:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-33d44a90c1e7ca6c23450b18555807aa.png"},82196:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-9ec9870b9a630c04ce166ffc6246aa17.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[90239],{3905:function(e,n,t){t.d(n,{Zo:function(){return u},kt:function(){return k}});var a=t(67294);function i(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function r(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function o(e){for(var n=1;n=0||(i[t]=e[t]);return i}(e,n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(i[t]=e[t])}return i}var l=a.createContext({}),p=function(e){var n=a.useContext(l),t=n;return e&&(t="function"==typeof e?e(n):o(o({},n),e)),t},u=function(e){var n=p(e.components);return a.createElement(l.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},d=a.forwardRef((function(e,n){var t=e.components,i=e.mdxType,r=e.originalType,l=e.parentName,u=s(e,["components","mdxType","originalType","parentName"]),d=p(t),k=i,g=d["".concat(l,".").concat(k)]||d[k]||c[k]||r;return t?a.createElement(g,o(o({ref:n},u),{},{components:t})):a.createElement(g,o({ref:n},u))}));function k(e,n){var t=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var r=t.length,o=new Array(r);o[0]=d;var s={};for(var l in n)hasOwnProperty.call(n,l)&&(s[l]=n[l]);s.originalType=e,s.mdxType="string"==typeof e?e:i,o[1]=s;for(var p=2;p"," tag to 2.1.0, and then compile this module separately."),(0,r.kt)("h3",{id:"22-spark-engineconn-deployment-and-loading"},"2.2 spark engineConn deployment and loading"),(0,r.kt)("p",null,"If you have already compiled your spark EngineConn plug-in has been compiled, then you need to put the new plug-in to the specified location to load, you can refer to the following article for details"),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin Installation")," "),(0,r.kt)("h3",{id:"23-tags-of-spark-engineconn"},"2.3 tags of spark EngineConn"),(0,r.kt)("p",null,"Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below."),(0,r.kt)("p",null,(0,r.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)")," "),(0,r.kt)("h2",{id:"3-use-of-spark-engineconn"},"3. Use of spark EngineConn"),(0,r.kt)("h3",{id:"preparation-for-operation-queue-setting"},"Preparation for operation, queue setting"),(0,r.kt)("p",null,"Because the execution of spark is a resource that requires a queue, the user must set up a queue that he can execute before executing."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(67117).Z})),(0,r.kt)("p",null,"Figure 3-1 Queue settings"),(0,r.kt)("p",null,"You can also add the queue value in the StartUpMap of the submission parameter: ",(0,r.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,r.kt)("h3",{id:"31-how-to-use-linkis-sdk"},"3.1 How to use Linkis SDK"),(0,r.kt)("p",null,"Linkis provides a client method to call Spark tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to ",(0,r.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/sdk_manual"},"JAVA SDK Manual"),".\nIf you use Hive, you only need to make the following changes:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType\n')),(0,r.kt)("h3",{id:"32-how-to-use-linkis-cli"},"3.2 How to use Linkis-cli"),(0,r.kt)("p",null,"After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Spark is as follows:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType correspondence py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# You can specify the yarn queue in the submission parameter by -confMap wds.linkis.yarnqueue=dws\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,r.kt)("p",null,"The specific usage can refer to ",(0,r.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,r.kt)("h3",{id:"33-how-to-use-scriptis"},"3.3 How to use Scriptis"),(0,r.kt)("p",null,"The use of ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/WeBankFinTech/Scriptis"},"Scriptis")," is the simplest. You can directly enter Scriptis and create a new sql, scala or pyspark script for execution."),(0,r.kt)("p",null,"The sql method is the simplest. You can create a new sql script and write and execute it. When it is executed, the progress will be displayed. If the user does not have a spark EngineConn at the beginning, the execution of sql will start a spark session (it may take some time here),\nAfter the SparkSession is initialized, you can start to execute sql."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(82196).Z})),(0,r.kt)("p",null,"Figure 3-2 Screenshot of the execution effect of sparksql"),(0,r.kt)("p",null,"For spark-scala tasks, we have initialized sqlContext and other variables, and users can directly use this sqlContext to execute sql."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(91298).Z})),(0,r.kt)("p",null,"Figure 3-3 Execution effect diagram of spark-scala"),(0,r.kt)("p",null,"Similarly, in the way of pyspark, we have also initialized the SparkSession, and users can directly use spark.sql to execute SQL."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(47829).Z}),"\nFigure 3-4 pyspark execution mode"),(0,r.kt)("h2",{id:"4-spark-engineconn-user-settings"},"4. Spark EngineConn user settings"),(0,r.kt)("p",null,"In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(63593).Z})),(0,r.kt)("p",null,"Figure 4-1 Spark user-defined configuration management console"))}d.isMDXComponent=!0},47829:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-6e37a518afe1e104834abe4adecbbbf2.png"},67117:function(e,n,t){n.Z=t.p+"assets/images/queue-set-7c9c3c2db8e77ce7f948909adfd80398.png"},91298:function(e,n,t){n.Z=t.p+"assets/images/scala-run-a2982b184c1e726e9746ac040910d775.png"},63593:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-33d44a90c1e7ca6c23450b18555807aa.png"},82196:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-9ec9870b9a630c04ce166ffc6246aa17.png"}}]); \ No newline at end of file diff --git a/assets/js/5ca5940e.13035838.js b/assets/js/5ca5940e.43c8f57b.js similarity index 98% rename from assets/js/5ca5940e.13035838.js rename to assets/js/5ca5940e.43c8f57b.js index 8cae3ab7120..7a298678b49 100644 --- a/assets/js/5ca5940e.13035838.js +++ b/assets/js/5ca5940e.43c8f57b.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[52066],{3905:function(e,n,i){i.d(n,{Zo:function(){return c},kt:function(){return g}});var t=i(67294);function l(e,n,i){return n in e?Object.defineProperty(e,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[n]=i,e}function a(e,n){var i=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);n&&(t=t.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),i.push.apply(i,t)}return i}function r(e){for(var n=1;n=0||(l[i]=e[i]);return l}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(t=0;t=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(l[i]=e[i])}return l}var p=t.createContext({}),s=function(e){var n=t.useContext(p),i=n;return e&&(i="function"==typeof e?e(n):r(r({},n),e)),i},c=function(e){var n=s(e.components);return t.createElement(p.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return t.createElement(t.Fragment,{},n)}},d=t.forwardRef((function(e,n){var i=e.components,l=e.mdxType,a=e.originalType,p=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),d=s(i),g=l,m=d["".concat(p,".").concat(g)]||d[g]||u[g]||a;return i?t.createElement(m,r(r({ref:n},c),{},{components:i})):t.createElement(m,r({ref:n},c))}));function g(e,n){var i=arguments,l=n&&n.mdxType;if("string"==typeof e||l){var a=i.length,r=new Array(a);r[0]=d;var o={};for(var p in n)hasOwnProperty.call(n,p)&&(o[p]=n[p]);o.originalType=e,o.mdxType="string"==typeof e?e:l,r[1]=o;for(var s=2;s=1.1.0 version support) engine.",source:"@site/docs/engine_usage/pipeline.md",sourceDirName:"engine_usage",slug:"/engine_usage/pipeline",permalink:"/docs/1.1.3/engine_usage/pipeline",editUrl:"https://github.com/apache/incubator-linkis-website/edit/dev/docs/engine_usage/pipeline.md",tags:[],version:"current",sidebarPosition:10,frontMatter:{title:"pipeline engine",sidebar_position:10},sidebar:"tutorialSidebar",previous:{title:"Sqoop Engine",permalink:"/docs/1.1.3/engine_usage/sqoop"},next:{title:"Overview",permalink:"/docs/1.1.3/api/overview"}},c=[{value:"1 Configuration and deployment",id:"1-configuration-and-deployment",children:[{value:"1.1 Version selection and compilation",id:"11-version-selection-and-compilation",children:[]},{value:"1.2 Material deployment and loading",id:"12-material-deployment-and-loading",children:[]},{value:"1.3 Engine label",id:"13-engine-label",children:[]}]},{value:"2 Use of engine",id:"2-use-of-engine",children:[{value:"2.1 Task submission via linkis cli",id:"21-task-submission-via-linkis-cli",children:[]},{value:"2.2 New script",id:"22-new-script",children:[]},{value:"2.3 Script",id:"23-script",children:[]},{value:"2.4 result",id:"24-result",children:[]}]}],u={toc:c};function d(e){var n=e.components,o=(0,l.Z)(e,r);return(0,a.kt)("wrapper",(0,t.Z)({},u,o,{components:n,mdxType:"MDXLayout"}),(0,a.kt)("p",null,"This article mainly introduces the configuration, deployment and use of pipeline (>=1.1.0 version support) engine."),(0,a.kt)("h2",{id:"1-configuration-and-deployment"},"1 Configuration and deployment"),(0,a.kt)("h3",{id:"11-version-selection-and-compilation"},"1.1 Version selection and compilation"),(0,a.kt)("p",null,"Note: before compiling the ",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline"),"engine, you need to compile the linkis project in full\nCurrently, the ",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline")," engine needs to be installed and deployed by itself"),(0,a.kt)("p",null,"This engine plug-in is not included in the published installation and deployment package by default,\nYou can follow this guide to deploy the installation ",(0,a.kt)("a",{parentName:"p",href:"https://linkis.apache.org/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin"},"https://linkis.apache.org/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin"),"\nOr manually compile the deployment according to the following process"),(0,a.kt)("p",null,"Compile separately",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline")," "),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"${linkis_code_dir}/linkis-enginepconn-pugins/engineconn-plugins/pipeline/\nmvn clean install\n")),(0,a.kt)("h3",{id:"12-material-deployment-and-loading"},"1.2 Material deployment and loading"),(0,a.kt)("p",null,"\u5c06 1.1 The engine package compiled in step, located in"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"${linkis_code_dir}/linkis-engineconn-plugins/engineconn-plugins/pipeline/target/out/pipeline\n")),(0,a.kt)("p",null,"Upload to the engine directory of the server"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"${LINKIS_HOME}/lib/linkis-engineplugins\n")),(0,a.kt)("p",null,"And restart the ",(0,a.kt)("inlineCode",{parentName:"p"},"linkis engineplugin")," to refresh the engine"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"cd ${LINKIS_HOME}/sbin\nsh linkis-daemon.sh restart cg-engineplugin\n")),(0,a.kt)("p",null,"Or refresh through the engine interface. After the engine is placed in the corresponding directory, send a refresh request to the ",(0,a.kt)("inlineCode",{parentName:"p"},"linkis CG engineconplugin service")," through the HTTP interface."),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"Interface",(0,a.kt)("inlineCode",{parentName:"p"},"http://${engineconn-plugin-server-IP}:${port}/api/rest_j/v1/rpc/receiveAndReply"))),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"Request mode ",(0,a.kt)("inlineCode",{parentName:"p"},"POST")))),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-json"},'{\n "method": "/enginePlugin/engineConn/refreshAll"\n}\n')),(0,a.kt)("p",null,"Check whether the engine is refreshed successfully: if you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can view the",(0,a.kt)("inlineCode",{parentName:"p"},"linkis_engine_conn_plugin_bml_resources"),"Of this table",(0,a.kt)("inlineCode",{parentName:"p"},"last_update_time"),"Whether it is the time when the refresh is triggered."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-sql"},"#Log in to the database of linkis\nselect * from linkis_cg_engine_conn_plugin_bml_resources\n")),(0,a.kt)("h3",{id:"13-engine-label"},"1.3 Engine label"),(0,a.kt)("p",null,"Linkis1.XIt is carried out through labels, so it is necessary to insert data into our database. The insertion method is shown below."),(0,a.kt)("p",null,(0,a.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin Engine plug-in installation")," "),(0,a.kt)("h2",{id:"2-use-of-engine"},"2 Use of engine"),(0,a.kt)("h3",{id:"21-task-submission-via-linkis-cli"},"2.1 Task submission via linkis cli"),(0,a.kt)("p",null,"Link 1.0 provides cli to submit tasks. We only need to specify the corresponding enginecon and codetype tag types. The use of pipeline is as follows:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"Note that the enginetype pipeline-1 engine version setting is prefixed. If the pipeline version is V1 , it is set to pipeline-1 ")),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-shell"},'sh bin/linkis-cli -submitUser hadoop -engineType pipeline-1 -codeType pipeline -code "from hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv"\n')),(0,a.kt)("p",null,"from hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv 3.3 Explained"),(0,a.kt)("p",null,"For specific use, please refer to\uff1a ",(0,a.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,a.kt)("p",null,"because",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline"),"The engine is mainly used to import and export files. Now let's assume that importing files from a to B is the most introduced case"),(0,a.kt)("h3",{id:"22-new-script"},"2.2 New script"),(0,a.kt)("p",null,"Right click the workspace module and select Create a new workspace of type",(0,a.kt)("inlineCode",{parentName:"p"},"storage"),"Script for"),(0,a.kt)("p",null,(0,a.kt)("img",{src:i(20903).Z})),(0,a.kt)("h3",{id:"23-script"},"2.3 Script"),(0,a.kt)("h5",{id:"syntax-isfrom-path-to-path"},"Syntax is\uff1afrom path to path"),(0,a.kt)("p",null,"The syntax is file copy rule:",(0,a.kt)("inlineCode",{parentName:"p"},"dolphin"),"Suffix type files are result set files that can be converted to",(0,a.kt)("inlineCode",{parentName:"p"},".csv"),"Type and",(0,a.kt)("inlineCode",{parentName:"p"},".xlsx"),"Type file, other types can only be copied from address a to address B, referred to as handling"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"#dolphin type\nfrom hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv\nfrom hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.xlsx\n\n#Other types\nfrom hdfs:///000/000/000/A.txt to file:///000/000/000/B.txt\n")),(0,a.kt)("p",null,"A file importing script to B folder"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"from hdfs:///000/000/000/A.csv to file:///000/000/B/\n")),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"from")," grammar\uff0c",(0,a.kt)("inlineCode",{parentName:"li"},"to"),"\uff1agrammar"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"hdfs:///000/000/000/A.csv"),"\uff1aOutput file path and file"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"file:///000/000/B/"),"\uff1a Input file path and file")),(0,a.kt)("p",null,"B file import script to a folder"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"from hdfs:///000/000/000/B.csv to file:///000/000/000/A.CSV\n")),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"hdfs:///000/000/000/B.csv"),"\uff1a Output file path and file"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"file:///000/000/A/"),"\uff1a Input file path and file")),(0,a.kt)("p",null,(0,a.kt)("img",{src:i(32792).Z})),(0,a.kt)("p",null,"Note: no semicolon is allowed at the end of the syntax; Otherwise, the syntax is incorrect."),(0,a.kt)("h3",{id:"24-result"},"2.4 result"),(0,a.kt)("p",null,"speed of progress"),(0,a.kt)("p",null,(0,a.kt)("img",{src:i(77242).Z})),(0,a.kt)("p",null,"historical information\n",(0,a.kt)("img",{src:i(31937).Z})))}d.isMDXComponent=!0},31937:function(e,n,i){n.Z=i.p+"assets/images/historical_information-d99bbfb4230732cea0dbb96a34ac990a.png"},77242:function(e,n,i){n.Z=i.p+"assets/images/job_state-fb7240b087736c48def704b2a683b873.png"},20903:function(e,n,i){n.Z=i.p+"assets/images/new_pipeline_script-3a37e4c0883855702a289b87ded7cd90.png"},32792:function(e,n,i){n.Z=i.p+"assets/images/to_write-6b49f070a804d94e1882f6d11c41508c.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[52066],{3905:function(e,n,i){i.d(n,{Zo:function(){return c},kt:function(){return g}});var t=i(67294);function l(e,n,i){return n in e?Object.defineProperty(e,n,{value:i,enumerable:!0,configurable:!0,writable:!0}):e[n]=i,e}function a(e,n){var i=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);n&&(t=t.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),i.push.apply(i,t)}return i}function r(e){for(var n=1;n=0||(l[i]=e[i]);return l}(e,n);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(t=0;t=0||Object.prototype.propertyIsEnumerable.call(e,i)&&(l[i]=e[i])}return l}var p=t.createContext({}),s=function(e){var n=t.useContext(p),i=n;return e&&(i="function"==typeof e?e(n):r(r({},n),e)),i},c=function(e){var n=s(e.components);return t.createElement(p.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return t.createElement(t.Fragment,{},n)}},d=t.forwardRef((function(e,n){var i=e.components,l=e.mdxType,a=e.originalType,p=e.parentName,c=o(e,["components","mdxType","originalType","parentName"]),d=s(i),g=l,m=d["".concat(p,".").concat(g)]||d[g]||u[g]||a;return i?t.createElement(m,r(r({ref:n},c),{},{components:i})):t.createElement(m,r({ref:n},c))}));function g(e,n){var i=arguments,l=n&&n.mdxType;if("string"==typeof e||l){var a=i.length,r=new Array(a);r[0]=d;var o={};for(var p in n)hasOwnProperty.call(n,p)&&(o[p]=n[p]);o.originalType=e,o.mdxType="string"==typeof e?e:l,r[1]=o;for(var s=2;s=1.1.0 version support) engine.",source:"@site/docs/engine_usage/pipeline.md",sourceDirName:"engine_usage",slug:"/engine_usage/pipeline",permalink:"/docs/1.1.3/engine_usage/pipeline",editUrl:"https://github.com/apache/incubator-linkis-website/edit/dev/docs/engine_usage/pipeline.md",tags:[],version:"current",sidebarPosition:10,frontMatter:{title:"Pipeline Engine",sidebar_position:10},sidebar:"tutorialSidebar",previous:{title:"Sqoop Engine",permalink:"/docs/1.1.3/engine_usage/sqoop"},next:{title:"Overview",permalink:"/docs/1.1.3/api/overview"}},c=[{value:"1 Configuration and deployment",id:"1-configuration-and-deployment",children:[{value:"1.1 Version selection and compilation",id:"11-version-selection-and-compilation",children:[]},{value:"1.2 Material deployment and loading",id:"12-material-deployment-and-loading",children:[]},{value:"1.3 Engine label",id:"13-engine-label",children:[]}]},{value:"2 Use of engine",id:"2-use-of-engine",children:[{value:"2.1 Task submission via linkis cli",id:"21-task-submission-via-linkis-cli",children:[]},{value:"2.2 New script",id:"22-new-script",children:[]},{value:"2.3 Script",id:"23-script",children:[]},{value:"2.4 result",id:"24-result",children:[]}]}],u={toc:c};function d(e){var n=e.components,o=(0,l.Z)(e,r);return(0,a.kt)("wrapper",(0,t.Z)({},u,o,{components:n,mdxType:"MDXLayout"}),(0,a.kt)("p",null,"This article mainly introduces the configuration, deployment and use of pipeline (>=1.1.0 version support) engine."),(0,a.kt)("h2",{id:"1-configuration-and-deployment"},"1 Configuration and deployment"),(0,a.kt)("h3",{id:"11-version-selection-and-compilation"},"1.1 Version selection and compilation"),(0,a.kt)("p",null,"Note: before compiling the ",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline"),"engine, you need to compile the linkis project in full\nCurrently, the ",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline")," engine needs to be installed and deployed by itself"),(0,a.kt)("p",null,"This engine plug-in is not included in the published installation and deployment package by default,\nYou can follow this guide to deploy the installation ",(0,a.kt)("a",{parentName:"p",href:"https://linkis.apache.org/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin"},"https://linkis.apache.org/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin"),"\nOr manually compile the deployment according to the following process"),(0,a.kt)("p",null,"Compile separately",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline")," "),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre"},"${linkis_code_dir}/linkis-enginepconn-pugins/engineconn-plugins/pipeline/\nmvn clean install\n")),(0,a.kt)("h3",{id:"12-material-deployment-and-loading"},"1.2 Material deployment and loading"),(0,a.kt)("p",null,"\u5c06 1.1 The engine package compiled in step, located in"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"${linkis_code_dir}/linkis-engineconn-plugins/engineconn-plugins/pipeline/target/out/pipeline\n")),(0,a.kt)("p",null,"Upload to the engine directory of the server"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"${LINKIS_HOME}/lib/linkis-engineplugins\n")),(0,a.kt)("p",null,"And restart the ",(0,a.kt)("inlineCode",{parentName:"p"},"linkis engineplugin")," to refresh the engine"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"cd ${LINKIS_HOME}/sbin\nsh linkis-daemon.sh restart cg-engineplugin\n")),(0,a.kt)("p",null,"Or refresh through the engine interface. After the engine is placed in the corresponding directory, send a refresh request to the ",(0,a.kt)("inlineCode",{parentName:"p"},"linkis CG engineconplugin service")," through the HTTP interface."),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"Interface",(0,a.kt)("inlineCode",{parentName:"p"},"http://${engineconn-plugin-server-IP}:${port}/api/rest_j/v1/rpc/receiveAndReply"))),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("p",{parentName:"li"},"Request mode ",(0,a.kt)("inlineCode",{parentName:"p"},"POST")))),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-json"},'{\n "method": "/enginePlugin/engineConn/refreshAll"\n}\n')),(0,a.kt)("p",null,"Check whether the engine is refreshed successfully: if you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can view the",(0,a.kt)("inlineCode",{parentName:"p"},"linkis_engine_conn_plugin_bml_resources"),"Of this table",(0,a.kt)("inlineCode",{parentName:"p"},"last_update_time"),"Whether it is the time when the refresh is triggered."),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-sql"},"#Log in to the database of linkis\nselect * from linkis_cg_engine_conn_plugin_bml_resources\n")),(0,a.kt)("h3",{id:"13-engine-label"},"1.3 Engine label"),(0,a.kt)("p",null,"Linkis1.XIt is carried out through labels, so it is necessary to insert data into our database. The insertion method is shown below."),(0,a.kt)("p",null,(0,a.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin Engine plug-in installation")," "),(0,a.kt)("h2",{id:"2-use-of-engine"},"2 Use of engine"),(0,a.kt)("h3",{id:"21-task-submission-via-linkis-cli"},"2.1 Task submission via linkis cli"),(0,a.kt)("p",null,"Link 1.0 provides cli to submit tasks. We only need to specify the corresponding enginecon and codetype tag types. The use of pipeline is as follows:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},"Note that the enginetype pipeline-1 engine version setting is prefixed. If the pipeline version is V1 , it is set to pipeline-1 ")),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-shell"},'sh bin/linkis-cli -submitUser hadoop -engineType pipeline-1 -codeType pipeline -code "from hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv"\n')),(0,a.kt)("p",null,"from hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv 3.3 Explained"),(0,a.kt)("p",null,"For specific use, please refer to\uff1a ",(0,a.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,a.kt)("p",null,"because",(0,a.kt)("inlineCode",{parentName:"p"},"pipeline"),"The engine is mainly used to import and export files. Now let's assume that importing files from a to B is the most introduced case"),(0,a.kt)("h3",{id:"22-new-script"},"2.2 New script"),(0,a.kt)("p",null,"Right click the workspace module and select Create a new workspace of type",(0,a.kt)("inlineCode",{parentName:"p"},"storage"),"Script for"),(0,a.kt)("p",null,(0,a.kt)("img",{src:i(20903).Z})),(0,a.kt)("h3",{id:"23-script"},"2.3 Script"),(0,a.kt)("h5",{id:"syntax-isfrom-path-to-path"},"Syntax is\uff1afrom path to path"),(0,a.kt)("p",null,"The syntax is file copy rule:",(0,a.kt)("inlineCode",{parentName:"p"},"dolphin"),"Suffix type files are result set files that can be converted to",(0,a.kt)("inlineCode",{parentName:"p"},".csv"),"Type and",(0,a.kt)("inlineCode",{parentName:"p"},".xlsx"),"Type file, other types can only be copied from address a to address B, referred to as handling"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"#dolphin type\nfrom hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv\nfrom hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.xlsx\n\n#Other types\nfrom hdfs:///000/000/000/A.txt to file:///000/000/000/B.txt\n")),(0,a.kt)("p",null,"A file importing script to B folder"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"from hdfs:///000/000/000/A.csv to file:///000/000/B/\n")),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"from")," grammar\uff0c",(0,a.kt)("inlineCode",{parentName:"li"},"to"),"\uff1agrammar"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"hdfs:///000/000/000/A.csv"),"\uff1aOutput file path and file"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"file:///000/000/B/"),"\uff1a Input file path and file")),(0,a.kt)("p",null,"B file import script to a folder"),(0,a.kt)("pre",null,(0,a.kt)("code",{parentName:"pre",className:"language-bash"},"from hdfs:///000/000/000/B.csv to file:///000/000/000/A.CSV\n")),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"hdfs:///000/000/000/B.csv"),"\uff1a Output file path and file"),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("inlineCode",{parentName:"li"},"file:///000/000/A/"),"\uff1a Input file path and file")),(0,a.kt)("p",null,(0,a.kt)("img",{src:i(32792).Z})),(0,a.kt)("p",null,"Note: no semicolon is allowed at the end of the syntax; Otherwise, the syntax is incorrect."),(0,a.kt)("h3",{id:"24-result"},"2.4 result"),(0,a.kt)("p",null,"speed of progress"),(0,a.kt)("p",null,(0,a.kt)("img",{src:i(77242).Z})),(0,a.kt)("p",null,"historical information\n",(0,a.kt)("img",{src:i(31937).Z})))}d.isMDXComponent=!0},31937:function(e,n,i){n.Z=i.p+"assets/images/historical_information-d99bbfb4230732cea0dbb96a34ac990a.png"},77242:function(e,n,i){n.Z=i.p+"assets/images/job_state-fb7240b087736c48def704b2a683b873.png"},20903:function(e,n,i){n.Z=i.p+"assets/images/new_pipeline_script-3a37e4c0883855702a289b87ded7cd90.png"},32792:function(e,n,i){n.Z=i.p+"assets/images/to_write-6b49f070a804d94e1882f6d11c41508c.png"}}]); \ No newline at end of file diff --git a/assets/js/65df3d35.dea6b928.js b/assets/js/65df3d35.0e132c20.js similarity index 98% rename from assets/js/65df3d35.dea6b928.js rename to assets/js/65df3d35.0e132c20.js index ebfbcd5dafd..d80d7fba80b 100644 --- a/assets/js/65df3d35.dea6b928.js +++ b/assets/js/65df3d35.0e132c20.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[34643],{3905:function(e,t,n){n.d(t,{Zo:function(){return u},kt:function(){return f}});var r=n(67294);function i(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function a(e){for(var t=1;t=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var p=r.createContext({}),s=function(e){var t=r.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},u=function(e){var t=s(e.components);return r.createElement(p.Provider,{value:t},e.children)},l={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,p=e.parentName,u=c(e,["components","mdxType","originalType","parentName"]),d=s(n),f=i,m=d["".concat(p,".").concat(f)]||d[f]||l[f]||o;return n?r.createElement(m,a(a({ref:t},u),{},{components:n})):r.createElement(m,a({ref:t},u))}));function f(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,a=new Array(o);a[0]=d;var c={};for(var p in t)hasOwnProperty.call(t,p)&&(c[p]=t[p]);c.originalType=e,c.mdxType="string"==typeof e?e:i,a[1]=c;for(var s=2;s=0||(i[n]=e[n]);return i}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(i[n]=e[n])}return i}var p=r.createContext({}),s=function(e){var t=r.useContext(p),n=t;return e&&(n="function"==typeof e?e(t):a(a({},t),e)),n},u=function(e){var t=s(e.components);return r.createElement(p.Provider,{value:t},e.children)},l={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},d=r.forwardRef((function(e,t){var n=e.components,i=e.mdxType,o=e.originalType,p=e.parentName,u=c(e,["components","mdxType","originalType","parentName"]),d=s(n),f=i,m=d["".concat(p,".").concat(f)]||d[f]||l[f]||o;return n?r.createElement(m,a(a({ref:t},u),{},{components:n})):r.createElement(m,a({ref:t},u))}));function f(e,t){var n=arguments,i=t&&t.mdxType;if("string"==typeof e||i){var o=n.length,a=new Array(o);a[0]=d;var c={};for(var p in t)hasOwnProperty.call(t,p)&&(c[p]=t[p]);c.originalType=e,c.mdxType="string"==typeof e?e:i,a[1]=c;for(var s=2;s=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var l=o.createContext({}),c=function(n){var e=o.useContext(l),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},u=function(n){var e=c(n.components);return o.createElement(l.Provider,{value:e},n.children)},p={inlineCode:"code",wrapper:function(n){var e=n.children;return o.createElement(o.Fragment,{},e)}},h=o.forwardRef((function(n,e){var t=n.components,i=n.mdxType,r=n.originalType,l=n.parentName,u=s(n,["components","mdxType","originalType","parentName"]),h=c(t),d=i,y=h["".concat(l,".").concat(d)]||h[d]||p[d]||r;return t?o.createElement(y,a(a({ref:e},u),{},{components:t})):o.createElement(y,a({ref:e},u))}));function d(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var r=t.length,a=new Array(r);a[0]=h;var s={};for(var l in e)hasOwnProperty.call(e,l)&&(s[l]=e[l]);s.originalType=n,s.mdxType="string"==typeof n?n:i,a[1]=s;for(var c=2;c labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType\n')),(0,r.kt)("h3",{id:"32-how-to-use-linkis-cli"},"3.2 How to use Linkis-cli"),(0,r.kt)("p",null,"After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Python is as follows:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,r.kt)("p",null,"The specific usage can refer to ",(0,r.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,r.kt)("h3",{id:"33-how-to-use-scriptis"},"3.3 How to use Scriptis"),(0,r.kt)("p",null,"The way to use ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/WeBankFinTech/Scriptis"},"Scriptis")," is the simplest. You can directly enter Scriptis, right-click the directory and create a new python script, write python code and click Execute."),(0,r.kt)("p",null,"The execution logic of python is to start a python through Py4j\nGateway, and then the Python EngineConn submits the code to the python executor for execution."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(53050).Z})),(0,r.kt)("p",null,"Figure 3-1 Screenshot of the execution effect of python"),(0,r.kt)("h2",{id:"4-python-engineconn-user-settings"},"4. Python EngineConn user settings"),(0,r.kt)("p",null,"In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(64463).Z})),(0,r.kt)("p",null,"Figure 4-1 User-defined configuration management console of python"))}h.isMDXComponent=!0},64463:function(n,e,t){e.Z=t.p+"assets/images/python-config-ebbc3887013ead8fe621ad968aaf185c.png"},53050:function(n,e,t){e.Z=t.p+"assets/images/python-run-acaf217e664ca9de98ccd0d3dfc20b86.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[10527],{3905:function(n,e,t){t.d(e,{Zo:function(){return p},kt:function(){return d}});var o=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function r(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);e&&(o=o.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,o)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var l=o.createContext({}),c=function(n){var e=o.useContext(l),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},p=function(n){var e=c(n.components);return o.createElement(l.Provider,{value:e},n.children)},u={inlineCode:"code",wrapper:function(n){var e=n.children;return o.createElement(o.Fragment,{},e)}},h=o.forwardRef((function(n,e){var t=n.components,i=n.mdxType,r=n.originalType,l=n.parentName,p=s(n,["components","mdxType","originalType","parentName"]),h=c(t),d=i,y=h["".concat(l,".").concat(d)]||h[d]||u[d]||r;return t?o.createElement(y,a(a({ref:e},p),{},{components:t})):o.createElement(y,a({ref:e},p))}));function d(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var r=t.length,a=new Array(r);a[0]=h;var s={};for(var l in e)hasOwnProperty.call(e,l)&&(s[l]=e[l]);s.originalType=n,s.mdxType="string"==typeof n?n:i,a[1]=s;for(var c=2;c labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType\n')),(0,r.kt)("h3",{id:"32-how-to-use-linkis-cli"},"3.2 How to use Linkis-cli"),(0,r.kt)("p",null,"After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Python is as follows:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,r.kt)("p",null,"The specific usage can refer to ",(0,r.kt)("a",{parentName:"p",href:"/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,r.kt)("h3",{id:"33-how-to-use-scriptis"},"3.3 How to use Scriptis"),(0,r.kt)("p",null,"The way to use ",(0,r.kt)("a",{parentName:"p",href:"https://github.com/WeBankFinTech/Scriptis"},"Scriptis")," is the simplest. You can directly enter Scriptis, right-click the directory and create a new python script, write python code and click Execute."),(0,r.kt)("p",null,"The execution logic of python is to start a python through Py4j\nGateway, and then the Python EngineConn submits the code to the python executor for execution."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(53050).Z})),(0,r.kt)("p",null,"Figure 3-1 Screenshot of the execution effect of python"),(0,r.kt)("h2",{id:"4-python-engineconn-user-settings"},"4. Python EngineConn user settings"),(0,r.kt)("p",null,"In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load."),(0,r.kt)("p",null,(0,r.kt)("img",{src:t(64463).Z})),(0,r.kt)("p",null,"Figure 4-1 User-defined configuration management console of python"))}h.isMDXComponent=!0},64463:function(n,e,t){e.Z=t.p+"assets/images/python-config-ebbc3887013ead8fe621ad968aaf185c.png"},86873:function(n,e,t){e.Z=t.p+"assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png"},53050:function(n,e,t){e.Z=t.p+"assets/images/python-run-acaf217e664ca9de98ccd0d3dfc20b86.png"}}]); \ No newline at end of file diff --git a/assets/js/82c182bc.2d2b65a4.js b/assets/js/82c182bc.4d6fec64.js similarity index 99% rename from assets/js/82c182bc.2d2b65a4.js rename to assets/js/82c182bc.4d6fec64.js index a777997bf55..4c12445fbd5 100644 --- a/assets/js/82c182bc.2d2b65a4.js +++ b/assets/js/82c182bc.4d6fec64.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[46750],{3905:function(e,n,o){o.d(n,{Zo:function(){return c},kt:function(){return u}});var a=o(67294);function t(e,n,o){return n in e?Object.defineProperty(e,n,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[n]=o,e}function r(e,n){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),o.push.apply(o,a)}return o}function i(e){for(var n=1;n=0||(t[o]=e[o]);return t}(e,n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(t[o]=e[o])}return t}var l=a.createContext({}),p=function(e){var n=a.useContext(l),o=n;return e&&(o="function"==typeof e?e(n):i(i({},n),e)),o},c=function(e){var n=p(e.components);return a.createElement(l.Provider,{value:n},e.children)},d={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},g=a.forwardRef((function(e,n){var o=e.components,t=e.mdxType,r=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),g=p(o),u=t,m=g["".concat(l,".").concat(u)]||g[u]||d[u]||r;return o?a.createElement(m,i(i({ref:n},c),{},{components:o})):a.createElement(m,i({ref:n},c))}));function u(e,n){var o=arguments,t=n&&n.mdxType;if("string"==typeof e||t){var r=o.length,i=new Array(r);i[0]=g;var s={};for(var l in n)hasOwnProperty.call(n,l)&&(s[l]=n[l]);s.originalType=e,s.mdxType="string"==typeof e?e:t,i[1]=s;for(var p=2;p\n org.apache.linkis\n linkis-computation-client\n ${linkis.version}\n\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Test Case\uff1a")),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-scala"},'\npackage com.webank.wedatasphere.exchangis.job.server.log.client\n\nimport java.util.concurrent.TimeUnit\n\nimport java.util\n\nimport org.apache.linkis.computation.client.LinkisJobBuilder\nimport org.apache.linkis.computation.client.once.simple.{SimpleOnceJob, SimpleOnceJobBuilder, SubmittableSimpleOnceJob}\nimport org.apache.linkis.computation.client.operator.impl.{EngineConnLogOperator, EngineConnMetricsOperator, EngineConnProgressOperator}\nimport org.apache.linkis.computation.client.utils.LabelKeyUtils\n\nimport scala.collection.JavaConverters._\n\nobject SqoopOnceJobTest extends App {\n LinkisJobBuilder.setDefaultServerUrl("http://127.0.0.1:9001")\n val logPath = "C:\\\\Users\\\\resources\\\\log4j.properties"\n System.setProperty("log4j.configurationFile", logPath)\n val startUpMap = new util.HashMap[String, Any]\n startUpMap.put("wds.linkis.engineconn.java.driver.memory", "1g")\n val builder = SimpleOnceJob.builder().setCreateService("Linkis-Client")\n .addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "sqoop-1.4.6")\n .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "Client")\n .addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")\n .setStartupParams(startUpMap)\n .setMaxSubmitTime(30000)\n .addExecuteUser("freeuser")\n val onceJob = importJob(builder)\n val time = System.currentTimeMillis()\n onceJob.submit()\n println(onceJob.getId)\n val logOperator = onceJob.getOperator(EngineConnLogOperator.OPERATOR_NAME).asInstanceOf[EngineConnLogOperator]\n println(onceJob.getECMServiceInstance)\n logOperator.setFromLine(0)\n logOperator.setECMServiceInstance(onceJob.getECMServiceInstance)\n logOperator.setEngineConnType("sqoop")\n logOperator.setIgnoreKeywords("[main],[SpringContextShutdownHook]")\n var progressOperator = onceJob.getOperator(EngineConnProgressOperator.OPERATOR_NAME).asInstanceOf[EngineConnProgressOperator]\n var metricOperator = onceJob.getOperator(EngineConnMetricsOperator.OPERATOR_NAME).asInstanceOf[EngineConnMetricsOperator]\n var end = false\n var rowBefore = 1\n while (!end || rowBefore > 0){\n if(onceJob.isCompleted) {\n end = true\n metricOperator = null\n }\n logOperator.setPageSize(100)\n Utils.tryQuietly{\n val logs = logOperator.apply()\n logs.logs.asScala.foreach( log => {\n println(log)\n })\n rowBefore = logs.logs.size\n }\n Thread.sleep(3000)\n Option(metricOperator).foreach( operator => {\n if (!onceJob.isCompleted){\n println(s"Metric Monitor: ${operator.apply()}")\n println(s"Progress: ${progressOperator.apply()}")\n }\n })\n }\n onceJob.isCompleted\n onceJob.waitForCompleted()\n println(onceJob.getStatus)\n println(TimeUnit.SECONDS.convert(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS) + "s")\n System.exit(0)\n\n\n def importJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = {\n jobBuilder\n .addJobContent("sqoop.env.mapreduce.job.queuename", "queue_10")\n .addJobContent("sqoop.mode", "import")\n .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis")\n .addJobContent("sqoop.args.username", "free")\n .addJobContent("sqoop.args.password", "testpwd")\n .addJobContent("sqoop.args.query", "select id as order_number, sno as time from" +\n " exchangis where sno =1 and $CONDITIONS")\n .addJobContent("sqoop.args.hcatalog.database", "freedb")\n .addJobContent("sqoop.args.hcatalog.table", "zy_test")\n .addJobContent("sqoop.args.hcatalog.partition.keys", "month")\n .addJobContent("sqoop.args.hcatalog.partition.values", "3")\n .addJobContent("sqoop.args.num.mappers", "1")\n .build()\n }\n\n def exportJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = {\n jobBuilder\n .addJobContent("sqoop.env.mapreduce.job.queuename", "queue1")\n .addJobContent("sqoop.mode", "import")\n .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis")\n .addJobContent("sqoop.args.query", "select id as order, sno as great_time from" +\n " exchangis_table where sno =1 and $CONDITIONS")\n .addJobContent("sqoop.args.hcatalog.database", "hadoop")\n .addJobContent("sqoop.args.hcatalog.table", "partition_33")\n .addJobContent("sqoop.args.hcatalog.partition.keys", "month")\n .addJobContent("sqoop.args.hcatalog.partition.values", "4")\n .addJobContent("sqoop.args.num.mappers", "1")\n .build()\n }\n')),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Parameter Comparison table (with native parameters):**")),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"sqoop.env.mapreduce.job.queuename<=>-Dmapreduce.job.queuename\nsqoop.args.connection.manager<===>--connection-manager\nsqoop.args.connection.param.file<===>--connection-param-file\nsqoop.args.driver<===>--driver\nsqoop.args.hadoop.home<===>--hadoop-home\nsqoop.args.hadoop.mapred.home<===>--hadoop-mapred-home\nsqoop.args.help<===>help\nsqoop.args.password<===>--password\nsqoop.args.password.alias<===>--password-alias\nsqoop.args.password.file<===>--password-file\nsqoop.args.relaxed.isolation<===>--relaxed-isolation\nsqoop.args.skip.dist.cache<===>--skip-dist-cache\nsqoop.args.username<===>--username\nsqoop.args.verbose<===>--verbose\nsqoop.args.append<===>--append\nsqoop.args.as.avrodatafile<===>--as-avrodatafile\nsqoop.args.as.parquetfile<===>--as-parquetfile\nsqoop.args.as.sequencefile<===>--as-sequencefile\nsqoop.args.as.textfile<===>--as-textfile\nsqoop.args.autoreset.to.one.mapper<===>--autoreset-to-one-mapper\nsqoop.args.boundary.query<===>--boundary-query\nsqoop.args.case.insensitive<===>--case-insensitive\nsqoop.args.columns<===>--columns\nsqoop.args.compression.codec<===>--compression-codec\nsqoop.args.delete.target.dir<===>--delete-target-dir\nsqoop.args.direct<===>--direct\nsqoop.args.direct.split.size<===>--direct-split-size\nsqoop.args.query<===>--query\nsqoop.args.fetch.size<===>--fetch-size\nsqoop.args.inline.lob.limit<===>--inline-lob-limit\nsqoop.args.num.mappers<===>--num-mappers\nsqoop.args.mapreduce.job.name<===>--mapreduce-job-name\nsqoop.args.merge.key<===>--merge-key\nsqoop.args.split.by<===>--split-by\nsqoop.args.table<===>--table\nsqoop.args.target.dir<===>--target-dir\nsqoop.args.validate<===>--validate\nsqoop.args.validation.failurehandler<===>--validation-failurehandler\nsqoop.args.validation.threshold<===> --validation-threshold\nsqoop.args.validator<===>--validator\nsqoop.args.warehouse.dir<===>--warehouse-dir\nsqoop.args.where<===>--where\nsqoop.args.compress<===>--compress\nsqoop.args.check.column<===>--check-column\nsqoop.args.incremental<===>--incremental\nsqoop.args.last.value<===>--last-value\nsqoop.args.enclosed.by<===>--enclosed-by\nsqoop.args.escaped.by<===>--escaped-by\nsqoop.args.fields.terminated.by<===>--fields-terminated-by\nsqoop.args.lines.terminated.by<===>--lines-terminated-by\nsqoop.args.mysql.delimiters<===>--mysql-delimiters\nsqoop.args.optionally.enclosed.by<===>--optionally-enclosed-by\nsqoop.args.input.enclosed.by<===>--input-enclosed-by\nsqoop.args.input.escaped.by<===>--input-escaped-by\nsqoop.args.input.fields.terminated.by<===>--input-fields-terminated-by\nsqoop.args.input.lines.terminated.by<===>--input-lines-terminated-by\nsqoop.args.input.optionally.enclosed.by<===>--input-optionally-enclosed-by\nsqoop.args.create.hive.table<===>--create-hive-table\nsqoop.args.hive.delims.replacement<===>--hive-delims-replacement\nsqoop.args.hive.database<===>--hive-database\nsqoop.args.hive.drop.import.delims<===>--hive-drop-import-delims\nsqoop.args.hive.home<===>--hive-home\nsqoop.args.hive.import<===>--hive-import\nsqoop.args.hive.overwrite<===>--hive-overwrite\nsqoop.args.hive.partition.value<===>--hive-partition-value\nsqoop.args.hive.table<===>--hive-table\nsqoop.args.column.family<===>--column-family\nsqoop.args.hbase.bulkload<===>--hbase-bulkload\nsqoop.args.hbase.create.table<===>--hbase-create-table\nsqoop.args.hbase.row.key<===>--hbase-row-key\nsqoop.args.hbase.table<===>--hbase-table\nsqoop.args.hcatalog.database<===>--hcatalog-database\nsqoop.args.hcatalog.home<===>--hcatalog-home\nsqoop.args.hcatalog.partition.keys<===>--hcatalog-partition-keys\nsqoop.args.hcatalog.partition.values<===>--hcatalog-partition-values\nsqoop.args.hcatalog.table<===>--hcatalog-table\nsqoop.args.hive.partition.key<===>--hive-partition-key\nsqoop.args.map.column.hive<===>--map-column-hive\nsqoop.args.create.hcatalog.table<===>--create-hcatalog-table\nsqoop.args.hcatalog.storage.stanza<===>--hcatalog-storage-stanza\nsqoop.args.accumulo.batch.size<===>--accumulo-batch-size\nsqoop.args.accumulo.column.family<===>--accumulo-column-family\nsqoop.args.accumulo.create.table<===>--accumulo-create-table\nsqoop.args.accumulo.instance<===>--accumulo-instance\nsqoop.args.accumulo.max.latency<===>--accumulo-max-latency\nsqoop.args.accumulo.password<===>--accumulo-password\nsqoop.args.accumulo.row.key<===>--accumulo-row-key\nsqoop.args.accumulo.table<===>--accumulo-table\nsqoop.args.accumulo.user<===>--accumulo-user\nsqoop.args.accumulo.visibility<===>--accumulo-visibility\nsqoop.args.accumulo.zookeepers<===>--accumulo-zookeepers\nsqoop.args.bindir<===>--bindir\nsqoop.args.class.name<===>--class-name\nsqoop.args.input.null.non.string<===>--input-null-non-string\nsqoop.args.input.null.string<===>--input-null-string\nsqoop.args.jar.file<===>--jar-file\nsqoop.args.map.column.java<===>--map-column-java\nsqoop.args.null.non.string<===>--null-non-string\nsqoop.args.null.string<===>--null-string\nsqoop.args.outdir<===>--outdir\nsqoop.args.package.name<===>--package-name\nsqoop.args.conf<===>-conf\nsqoop.args.D<===>-D\nsqoop.args.fs<===>-fs\nsqoop.args.jt<===>-jt\nsqoop.args.files<===>-files\nsqoop.args.libjars<===>-libjars\nsqoop.args.archives<===>-archives\nsqoop.args.update.key<===>--update-key\nsqoop.args.update.mode<===>--update-mode\nsqoop.args.export.dir<===>--export-dir\n")))}g.isMDXComponent=!0}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[46750],{3905:function(e,n,o){o.d(n,{Zo:function(){return c},kt:function(){return u}});var a=o(67294);function t(e,n,o){return n in e?Object.defineProperty(e,n,{value:o,enumerable:!0,configurable:!0,writable:!0}):e[n]=o,e}function r(e,n){var o=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),o.push.apply(o,a)}return o}function i(e){for(var n=1;n=0||(t[o]=e[o]);return t}(e,n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,o)&&(t[o]=e[o])}return t}var l=a.createContext({}),p=function(e){var n=a.useContext(l),o=n;return e&&(o="function"==typeof e?e(n):i(i({},n),e)),o},c=function(e){var n=p(e.components);return a.createElement(l.Provider,{value:n},e.children)},d={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},g=a.forwardRef((function(e,n){var o=e.components,t=e.mdxType,r=e.originalType,l=e.parentName,c=s(e,["components","mdxType","originalType","parentName"]),g=p(o),u=t,m=g["".concat(l,".").concat(u)]||g[u]||d[u]||r;return o?a.createElement(m,i(i({ref:n},c),{},{components:o})):a.createElement(m,i({ref:n},c))}));function u(e,n){var o=arguments,t=n&&n.mdxType;if("string"==typeof e||t){var r=o.length,i=new Array(r);i[0]=g;var s={};for(var l in n)hasOwnProperty.call(n,l)&&(s[l]=n[l]);s.originalType=e,s.mdxType="string"==typeof e?e:t,i[1]=s;for(var p=2;p\n org.apache.linkis\n linkis-computation-client\n ${linkis.version}\n\n")),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Test Case\uff1a")),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-scala"},'\npackage com.webank.wedatasphere.exchangis.job.server.log.client\n\nimport java.util.concurrent.TimeUnit\n\nimport java.util\n\nimport org.apache.linkis.computation.client.LinkisJobBuilder\nimport org.apache.linkis.computation.client.once.simple.{SimpleOnceJob, SimpleOnceJobBuilder, SubmittableSimpleOnceJob}\nimport org.apache.linkis.computation.client.operator.impl.{EngineConnLogOperator, EngineConnMetricsOperator, EngineConnProgressOperator}\nimport org.apache.linkis.computation.client.utils.LabelKeyUtils\n\nimport scala.collection.JavaConverters._\n\nobject SqoopOnceJobTest extends App {\n LinkisJobBuilder.setDefaultServerUrl("http://127.0.0.1:9001")\n val logPath = "C:\\\\Users\\\\resources\\\\log4j.properties"\n System.setProperty("log4j.configurationFile", logPath)\n val startUpMap = new util.HashMap[String, Any]\n startUpMap.put("wds.linkis.engineconn.java.driver.memory", "1g")\n val builder = SimpleOnceJob.builder().setCreateService("Linkis-Client")\n .addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "sqoop-1.4.6")\n .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "Client")\n .addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")\n .setStartupParams(startUpMap)\n .setMaxSubmitTime(30000)\n .addExecuteUser("freeuser")\n val onceJob = importJob(builder)\n val time = System.currentTimeMillis()\n onceJob.submit()\n println(onceJob.getId)\n val logOperator = onceJob.getOperator(EngineConnLogOperator.OPERATOR_NAME).asInstanceOf[EngineConnLogOperator]\n println(onceJob.getECMServiceInstance)\n logOperator.setFromLine(0)\n logOperator.setECMServiceInstance(onceJob.getECMServiceInstance)\n logOperator.setEngineConnType("sqoop")\n logOperator.setIgnoreKeywords("[main],[SpringContextShutdownHook]")\n var progressOperator = onceJob.getOperator(EngineConnProgressOperator.OPERATOR_NAME).asInstanceOf[EngineConnProgressOperator]\n var metricOperator = onceJob.getOperator(EngineConnMetricsOperator.OPERATOR_NAME).asInstanceOf[EngineConnMetricsOperator]\n var end = false\n var rowBefore = 1\n while (!end || rowBefore > 0){\n if(onceJob.isCompleted) {\n end = true\n metricOperator = null\n }\n logOperator.setPageSize(100)\n Utils.tryQuietly{\n val logs = logOperator.apply()\n logs.logs.asScala.foreach( log => {\n println(log)\n })\n rowBefore = logs.logs.size\n }\n Thread.sleep(3000)\n Option(metricOperator).foreach( operator => {\n if (!onceJob.isCompleted){\n println(s"Metric Monitor: ${operator.apply()}")\n println(s"Progress: ${progressOperator.apply()}")\n }\n })\n }\n onceJob.isCompleted\n onceJob.waitForCompleted()\n println(onceJob.getStatus)\n println(TimeUnit.SECONDS.convert(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS) + "s")\n System.exit(0)\n\n\n def importJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = {\n jobBuilder\n .addJobContent("sqoop.env.mapreduce.job.queuename", "queue_10")\n .addJobContent("sqoop.mode", "import")\n .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis")\n .addJobContent("sqoop.args.username", "free")\n .addJobContent("sqoop.args.password", "testpwd")\n .addJobContent("sqoop.args.query", "select id as order_number, sno as time from" +\n " exchangis where sno =1 and $CONDITIONS")\n .addJobContent("sqoop.args.hcatalog.database", "freedb")\n .addJobContent("sqoop.args.hcatalog.table", "zy_test")\n .addJobContent("sqoop.args.hcatalog.partition.keys", "month")\n .addJobContent("sqoop.args.hcatalog.partition.values", "3")\n .addJobContent("sqoop.args.num.mappers", "1")\n .build()\n }\n\n def exportJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = {\n jobBuilder\n .addJobContent("sqoop.env.mapreduce.job.queuename", "queue1")\n .addJobContent("sqoop.mode", "import")\n .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis")\n .addJobContent("sqoop.args.query", "select id as order, sno as great_time from" +\n " exchangis_table where sno =1 and $CONDITIONS")\n .addJobContent("sqoop.args.hcatalog.database", "hadoop")\n .addJobContent("sqoop.args.hcatalog.table", "partition_33")\n .addJobContent("sqoop.args.hcatalog.partition.keys", "month")\n .addJobContent("sqoop.args.hcatalog.partition.values", "4")\n .addJobContent("sqoop.args.num.mappers", "1")\n .build()\n }\n')),(0,r.kt)("p",null,(0,r.kt)("strong",{parentName:"p"},"Parameter Comparison table (with native parameters):**")),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"},"sqoop.env.mapreduce.job.queuename<=>-Dmapreduce.job.queuename\nsqoop.args.connection.manager<===>--connection-manager\nsqoop.args.connection.param.file<===>--connection-param-file\nsqoop.args.driver<===>--driver\nsqoop.args.hadoop.home<===>--hadoop-home\nsqoop.args.hadoop.mapred.home<===>--hadoop-mapred-home\nsqoop.args.help<===>help\nsqoop.args.password<===>--password\nsqoop.args.password.alias<===>--password-alias\nsqoop.args.password.file<===>--password-file\nsqoop.args.relaxed.isolation<===>--relaxed-isolation\nsqoop.args.skip.dist.cache<===>--skip-dist-cache\nsqoop.args.username<===>--username\nsqoop.args.verbose<===>--verbose\nsqoop.args.append<===>--append\nsqoop.args.as.avrodatafile<===>--as-avrodatafile\nsqoop.args.as.parquetfile<===>--as-parquetfile\nsqoop.args.as.sequencefile<===>--as-sequencefile\nsqoop.args.as.textfile<===>--as-textfile\nsqoop.args.autoreset.to.one.mapper<===>--autoreset-to-one-mapper\nsqoop.args.boundary.query<===>--boundary-query\nsqoop.args.case.insensitive<===>--case-insensitive\nsqoop.args.columns<===>--columns\nsqoop.args.compression.codec<===>--compression-codec\nsqoop.args.delete.target.dir<===>--delete-target-dir\nsqoop.args.direct<===>--direct\nsqoop.args.direct.split.size<===>--direct-split-size\nsqoop.args.query<===>--query\nsqoop.args.fetch.size<===>--fetch-size\nsqoop.args.inline.lob.limit<===>--inline-lob-limit\nsqoop.args.num.mappers<===>--num-mappers\nsqoop.args.mapreduce.job.name<===>--mapreduce-job-name\nsqoop.args.merge.key<===>--merge-key\nsqoop.args.split.by<===>--split-by\nsqoop.args.table<===>--table\nsqoop.args.target.dir<===>--target-dir\nsqoop.args.validate<===>--validate\nsqoop.args.validation.failurehandler<===>--validation-failurehandler\nsqoop.args.validation.threshold<===> --validation-threshold\nsqoop.args.validator<===>--validator\nsqoop.args.warehouse.dir<===>--warehouse-dir\nsqoop.args.where<===>--where\nsqoop.args.compress<===>--compress\nsqoop.args.check.column<===>--check-column\nsqoop.args.incremental<===>--incremental\nsqoop.args.last.value<===>--last-value\nsqoop.args.enclosed.by<===>--enclosed-by\nsqoop.args.escaped.by<===>--escaped-by\nsqoop.args.fields.terminated.by<===>--fields-terminated-by\nsqoop.args.lines.terminated.by<===>--lines-terminated-by\nsqoop.args.mysql.delimiters<===>--mysql-delimiters\nsqoop.args.optionally.enclosed.by<===>--optionally-enclosed-by\nsqoop.args.input.enclosed.by<===>--input-enclosed-by\nsqoop.args.input.escaped.by<===>--input-escaped-by\nsqoop.args.input.fields.terminated.by<===>--input-fields-terminated-by\nsqoop.args.input.lines.terminated.by<===>--input-lines-terminated-by\nsqoop.args.input.optionally.enclosed.by<===>--input-optionally-enclosed-by\nsqoop.args.create.hive.table<===>--create-hive-table\nsqoop.args.hive.delims.replacement<===>--hive-delims-replacement\nsqoop.args.hive.database<===>--hive-database\nsqoop.args.hive.drop.import.delims<===>--hive-drop-import-delims\nsqoop.args.hive.home<===>--hive-home\nsqoop.args.hive.import<===>--hive-import\nsqoop.args.hive.overwrite<===>--hive-overwrite\nsqoop.args.hive.partition.value<===>--hive-partition-value\nsqoop.args.hive.table<===>--hive-table\nsqoop.args.column.family<===>--column-family\nsqoop.args.hbase.bulkload<===>--hbase-bulkload\nsqoop.args.hbase.create.table<===>--hbase-create-table\nsqoop.args.hbase.row.key<===>--hbase-row-key\nsqoop.args.hbase.table<===>--hbase-table\nsqoop.args.hcatalog.database<===>--hcatalog-database\nsqoop.args.hcatalog.home<===>--hcatalog-home\nsqoop.args.hcatalog.partition.keys<===>--hcatalog-partition-keys\nsqoop.args.hcatalog.partition.values<===>--hcatalog-partition-values\nsqoop.args.hcatalog.table<===>--hcatalog-table\nsqoop.args.hive.partition.key<===>--hive-partition-key\nsqoop.args.map.column.hive<===>--map-column-hive\nsqoop.args.create.hcatalog.table<===>--create-hcatalog-table\nsqoop.args.hcatalog.storage.stanza<===>--hcatalog-storage-stanza\nsqoop.args.accumulo.batch.size<===>--accumulo-batch-size\nsqoop.args.accumulo.column.family<===>--accumulo-column-family\nsqoop.args.accumulo.create.table<===>--accumulo-create-table\nsqoop.args.accumulo.instance<===>--accumulo-instance\nsqoop.args.accumulo.max.latency<===>--accumulo-max-latency\nsqoop.args.accumulo.password<===>--accumulo-password\nsqoop.args.accumulo.row.key<===>--accumulo-row-key\nsqoop.args.accumulo.table<===>--accumulo-table\nsqoop.args.accumulo.user<===>--accumulo-user\nsqoop.args.accumulo.visibility<===>--accumulo-visibility\nsqoop.args.accumulo.zookeepers<===>--accumulo-zookeepers\nsqoop.args.bindir<===>--bindir\nsqoop.args.class.name<===>--class-name\nsqoop.args.input.null.non.string<===>--input-null-non-string\nsqoop.args.input.null.string<===>--input-null-string\nsqoop.args.jar.file<===>--jar-file\nsqoop.args.map.column.java<===>--map-column-java\nsqoop.args.null.non.string<===>--null-non-string\nsqoop.args.null.string<===>--null-string\nsqoop.args.outdir<===>--outdir\nsqoop.args.package.name<===>--package-name\nsqoop.args.conf<===>-conf\nsqoop.args.D<===>-D\nsqoop.args.fs<===>-fs\nsqoop.args.jt<===>-jt\nsqoop.args.files<===>-files\nsqoop.args.libjars<===>-libjars\nsqoop.args.archives<===>-archives\nsqoop.args.update.key<===>--update-key\nsqoop.args.update.mode<===>--update-mode\nsqoop.args.export.dir<===>--export-dir\n")))}g.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/935f2afb.cabe5c87.js b/assets/js/935f2afb.23dd1aac.js similarity index 99% rename from assets/js/935f2afb.cabe5c87.js rename to assets/js/935f2afb.23dd1aac.js index 9b53d21f3a3..ce8d0d1d628 100644 --- a/assets/js/935f2afb.cabe5c87.js +++ b/assets/js/935f2afb.23dd1aac.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[80053],{1109:function(e){e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next(1.1.3)","banner":"unreleased","badge":true,"className":"docs-version-current","isLast":false,"docsSidebars":{"tutorialSidebar":[{"type":"link","label":"Introduction","href":"/docs/1.1.3/introduction"},{"type":"link","label":"Version overview","href":"/docs/1.1.3/release"},{"type":"link","label":"Release Notes 1.1.3-RC1","href":"/docs/1.1.3/release-notes-1.1.3"},{"type":"category","label":"Deployment","items":[{"type":"link","label":"Quick Deployment","href":"/docs/1.1.3/deployment/quick_deploy"},{"type":"link","label":"Cluster Deployment","href":"/docs/1.1.3/deployment/cluster_deployment"},{"type":"link","label":"EngineConnPlugin Installation","href":"/docs/1.1.3/deployment/engine_conn_plugin_installation"},{"type":"link","label":"Installation Directory Structure","href":"/docs/1.1.3/deployment/installation_hierarchical_structure"},{"type":"link","label":"installation package directory structure","href":"/docs/1.1.3/deployment/unpack_hierarchical_structure"},{"type":"link","label":"Source Code Directory Structure","href":"/docs/1.1.3/deployment/sourcecode_hierarchical_structure"},{"type":"link","label":"Linkis Console Deployment","href":"/docs/1.1.3/deployment/web_install"},{"type":"link","label":"Involve SkyWaling into Linkis","href":"/docs/1.1.3/deployment/involve_skywalking_into_linkis"},{"type":"link","label":"DataSource","href":"/docs/1.1.3/deployment/start_metadatasource"},{"type":"link","label":"Deploy Linkis without HDFS","href":"/docs/1.1.3/deployment/deploy_linkis_without_hdfs"},{"type":"link","label":"Installation and deployment of the tool scriptis","href":"/docs/1.1.3/deployment/linkis_scriptis_install"},{"type":"link","label":"Involve Prometheus into Linkis","href":"/docs/1.1.3/deployment/involve_prometheus_into_linkis"},{"type":"link","label":"Involve Knife4j into Linkis","href":"/docs/1.1.3/deployment/involve_knife4j_into_linkis"}],"collapsed":true,"collapsible":true},{"type":"category","label":"User Guide","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/user_guide/overview"},{"type":"link","label":"How to Use","href":"/docs/1.1.3/user_guide/how_to_use"},{"type":"link","label":"JAVA SDK Manual","href":"/docs/1.1.3/user_guide/sdk_manual"},{"type":"link","label":"Use of UDFs","href":"/docs/1.1.3/user_guide/udf"},{"type":"link","label":"Linkis-Cli Manual","href":"/docs/1.1.3/user_guide/linkiscli_manual"},{"type":"link","label":"Console User Manual","href":"/docs/1.1.3/user_guide/console_manual"},{"type":"link","label":"DataSource Client SDK","href":"/docs/1.1.3/user_guide/linkis-datasource-client"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Engine Usage","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/engine_usage/overview"},{"type":"link","label":"Hive Engine Usage","href":"/docs/1.1.3/engine_usage/hive"},{"type":"link","label":"JDBC Engine Usage","href":"/docs/1.1.3/engine_usage/jdbc"},{"type":"link","label":"Python Engine Usage","href":"/docs/1.1.3/engine_usage/python"},{"type":"link","label":"Shell Engine Usage","href":"/docs/1.1.3/engine_usage/shell"},{"type":"link","label":"Spark Engine Usage","href":"/docs/1.1.3/engine_usage/spark"},{"type":"link","label":"Flink Engine Usage","href":"/docs/1.1.3/engine_usage/flink"},{"type":"link","label":"OpenLookEng Engine","href":"/docs/1.1.3/engine_usage/openlookeng"},{"type":"link","label":"Sqoop Engine","href":"/docs/1.1.3/engine_usage/sqoop"},{"type":"link","label":"pipeline engine","href":"/docs/1.1.3/engine_usage/pipeline"}],"collapsed":true,"collapsible":true},{"type":"category","label":"API Docs","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/api/overview"},{"type":"link","label":"Login Api","href":"/docs/1.1.3/api/login_api"},{"type":"link","label":"Task Submission and Execution Rest Api","href":"/docs/1.1.3/api/linkis_task_operator"},{"type":"link","label":"Task Submission And Execution Of JDBC API","href":"/docs/1.1.3/api/jdbc_api"},{"type":"category","label":"Http API","items":[{"type":"category","label":"Public Service","items":[{"type":"link","label":"History Job Interface","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api"},{"type":"link","label":"Ceneric Api","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api"},{"type":"link","label":"UDF Operations Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api"},{"type":"link","label":"BMLFS Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api"},{"type":"link","label":"Linkis Error Codes","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code"},{"type":"link","label":"Mdq Table Interface","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api"},{"type":"link","label":"Add Global Variable","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api"},{"type":"link","label":"Parameter Configuration","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api"},{"type":"link","label":"Instance Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api"},{"type":"link","label":"Filesystem","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api"},{"type":"link","label":"Admin Console Home Page Interface","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api"},{"type":"link","label":"BM Project Operation Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api"},{"type":"link","label":"BML Resource Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api"},{"type":"link","label":"DataSourceAdminRestfulApi","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api"},{"type":"link","label":"MetadataCoreRestful","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"LinkisManger Services","items":[{"type":"link","label":"EC Resource Information Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api"},{"type":"link","label":"ECM Resource Information Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api"},{"type":"link","label":"Engine Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api"},{"type":"link","label":"Resource Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Context Service","items":[{"type":"link","label":"Context History Service","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api"},{"type":"link","label":"Context API","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api"},{"type":"link","label":"Context Listening Service","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api"},{"type":"link","label":"Context Logging Service","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Engine Plugin Management Service","items":[{"type":"link","label":"Engine Material Refresh Interface","href":"/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh"},{"type":"link","label":"Engine Plugin Api","href":"/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Entrance Service","items":[{"type":"link","label":"Task Action","href":"/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api"},{"type":"link","label":"Task Management","href":"/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api"}],"collapsed":true,"collapsible":true}],"collapsed":true,"collapsible":true}],"collapsed":true,"collapsible":true},{"type":"category","label":"Table Structure","items":[{"type":"link","label":"UDF table structure","href":"/docs/1.1.3/table/udf-table"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Architecture","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/overview"},{"type":"link","label":"Difference Between 1.0 And 0.x","href":"/docs/1.1.3/architecture/difference_between_1.0_and_0.x"},{"type":"category","label":"Commons","items":[{"type":"link","label":"Custom Variable Design","href":"/docs/1.1.3/architecture/commons/variable"},{"type":"link","label":"RPC Module","href":"/docs/1.1.3/architecture/commons/rpc"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Computation Governance Services","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/computation_governance_services/overview"},{"type":"link","label":"Entrance Architecture Design","href":"/docs/1.1.3/architecture/computation_governance_services/entrance"},{"type":"category","label":"Linkis Manager","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview"},{"type":"link","label":"App Manager","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager"},{"type":"link","label":"Label Manager","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager"},{"type":"link","label":"Resource Manager","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Engine","items":[{"type":"link","label":"EngineConn Design","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn"},{"type":"link","label":"EngineConnManager Design","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager"},{"type":"link","label":"EngineConnPlugin (ECP) Design","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin"},{"type":"link","label":"Start engineConn","href":"/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn"},{"type":"link","label":"EngineConn History Features","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history"},{"type":"link","label":"EngineConn Metrics reporting feature","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics"}],"collapsed":true,"collapsible":true},{"type":"link","label":"Job Submission","href":"/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process"},{"type":"link","label":"Linkis-Client Architecture Design","href":"/docs/1.1.3/architecture/computation_governance_services/linkis-cli"},{"type":"link","label":"Proxy User Mode","href":"/docs/1.1.3/architecture/computation_governance_services/proxy_user"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Public Enhancement Services","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/public_enhancement_services/overview"},{"type":"link","label":"Public Service","href":"/docs/1.1.3/architecture/public_enhancement_services/public_service"},{"type":"category","label":"BML","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/public_enhancement_services/bml/overview"},{"type":"link","label":"Analysis of engin BML","href":"/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Context Service","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/overview"},{"type":"link","label":"CS Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service"},{"type":"link","label":"CS Cache Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache"},{"type":"link","label":"CS Client Design","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client"},{"type":"link","label":"CS HA Design","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable"},{"type":"link","label":"CS Listener Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener"},{"type":"link","label":"CS Persistence Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence"},{"type":"link","label":"CS Search Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search"},{"type":"link","label":"CS Cleanup Interface Features","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup"}],"collapsed":true,"collapsible":true},{"type":"link","label":"Data Source Management Service Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/datasource_manager"},{"type":"link","label":"Data Source Management Service Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/metadata_manager"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Microservice Governance Services","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/microservice_governance_services/overview"},{"type":"link","label":"Gateway Design","href":"/docs/1.1.3/architecture/microservice_governance_services/gateway"}],"collapsed":true,"collapsible":true}],"collapsed":true,"collapsible":true},{"type":"category","label":"Development Doc","items":[{"type":"link","label":"Compile And Package","href":"/docs/1.1.3/development/linkis_compile_and_package"},{"type":"link","label":"Introduction to Linkis Configuration Parameters","href":"/docs/1.1.3/development/linkis_config"},{"type":"link","label":"Linkis Debug","href":"/docs/1.1.3/development/linkis_debug"},{"type":"link","label":"Linkis Debug In Mac","href":"/docs/1.1.3/development/linkis_debug_in_mac"},{"type":"link","label":"How To Quickly Implement A New Engine","href":"/docs/1.1.3/development/new_engine_conn"},{"type":"link","label":"Linkis Console Compile","href":"/docs/1.1.3/development/web_build"},{"type":"link","label":"Swwager Annotation Instructions","href":"/docs/1.1.3/development/swwager_instructions"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Upgrade Guide","items":[{"type":"link","label":"Upgrade From 0.X To 1.0 Guide","href":"/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide"},{"type":"link","label":"Version upgrades above 1.0.3","href":"/docs/1.1.3/upgrade/upgrade_guide"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Tuning And Troubleshooting","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/tuning_and_troubleshooting/overview"},{"type":"link","label":"Configurations","href":"/docs/1.1.3/tuning_and_troubleshooting/configuration"},{"type":"link","label":"Tuning","href":"/docs/1.1.3/tuning_and_troubleshooting/tuning"}],"collapsed":true,"collapsible":true}]}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[80053],{1109:function(e){e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next(1.1.3)","banner":"unreleased","badge":true,"className":"docs-version-current","isLast":false,"docsSidebars":{"tutorialSidebar":[{"type":"link","label":"Introduction","href":"/docs/1.1.3/introduction"},{"type":"link","label":"Version overview","href":"/docs/1.1.3/release"},{"type":"link","label":"Release Notes 1.1.3-RC1","href":"/docs/1.1.3/release-notes-1.1.3"},{"type":"category","label":"Deployment","items":[{"type":"link","label":"Quick Deployment","href":"/docs/1.1.3/deployment/quick_deploy"},{"type":"link","label":"Cluster Deployment","href":"/docs/1.1.3/deployment/cluster_deployment"},{"type":"link","label":"EngineConnPlugin Installation","href":"/docs/1.1.3/deployment/engine_conn_plugin_installation"},{"type":"link","label":"Installation Directory Structure","href":"/docs/1.1.3/deployment/installation_hierarchical_structure"},{"type":"link","label":"installation package directory structure","href":"/docs/1.1.3/deployment/unpack_hierarchical_structure"},{"type":"link","label":"Source Code Directory Structure","href":"/docs/1.1.3/deployment/sourcecode_hierarchical_structure"},{"type":"link","label":"Linkis Console Deployment","href":"/docs/1.1.3/deployment/web_install"},{"type":"link","label":"Involve SkyWaling into Linkis","href":"/docs/1.1.3/deployment/involve_skywalking_into_linkis"},{"type":"link","label":"DataSource","href":"/docs/1.1.3/deployment/start_metadatasource"},{"type":"link","label":"Deploy Linkis without HDFS","href":"/docs/1.1.3/deployment/deploy_linkis_without_hdfs"},{"type":"link","label":"Installation and deployment of the tool scriptis","href":"/docs/1.1.3/deployment/linkis_scriptis_install"},{"type":"link","label":"Involve Prometheus into Linkis","href":"/docs/1.1.3/deployment/involve_prometheus_into_linkis"},{"type":"link","label":"Involve Knife4j into Linkis","href":"/docs/1.1.3/deployment/involve_knife4j_into_linkis"}],"collapsed":true,"collapsible":true},{"type":"category","label":"User Guide","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/user_guide/overview"},{"type":"link","label":"How to Use","href":"/docs/1.1.3/user_guide/how_to_use"},{"type":"link","label":"JAVA SDK Manual","href":"/docs/1.1.3/user_guide/sdk_manual"},{"type":"link","label":"Use of UDFs","href":"/docs/1.1.3/user_guide/udf"},{"type":"link","label":"Linkis-Cli Manual","href":"/docs/1.1.3/user_guide/linkiscli_manual"},{"type":"link","label":"Console User Manual","href":"/docs/1.1.3/user_guide/console_manual"},{"type":"link","label":"DataSource Client SDK","href":"/docs/1.1.3/user_guide/linkis-datasource-client"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Engine Usage","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/engine_usage/overview"},{"type":"link","label":"Hive Engine Usage","href":"/docs/1.1.3/engine_usage/hive"},{"type":"link","label":"JDBC Engine Usage","href":"/docs/1.1.3/engine_usage/jdbc"},{"type":"link","label":"Python Engine Usage","href":"/docs/1.1.3/engine_usage/python"},{"type":"link","label":"Shell Engine Usage","href":"/docs/1.1.3/engine_usage/shell"},{"type":"link","label":"Spark Engine Usage","href":"/docs/1.1.3/engine_usage/spark"},{"type":"link","label":"Flink Engine Usage","href":"/docs/1.1.3/engine_usage/flink"},{"type":"link","label":"OpenLookEng Engine","href":"/docs/1.1.3/engine_usage/openlookeng"},{"type":"link","label":"Sqoop Engine","href":"/docs/1.1.3/engine_usage/sqoop"},{"type":"link","label":"Pipeline Engine","href":"/docs/1.1.3/engine_usage/pipeline"}],"collapsed":true,"collapsible":true},{"type":"category","label":"API Docs","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/api/overview"},{"type":"link","label":"Login Api","href":"/docs/1.1.3/api/login_api"},{"type":"link","label":"Task Submission and Execution Rest Api","href":"/docs/1.1.3/api/linkis_task_operator"},{"type":"link","label":"Task Submission And Execution Of JDBC API","href":"/docs/1.1.3/api/jdbc_api"},{"type":"category","label":"Http API","items":[{"type":"category","label":"Public Service","items":[{"type":"link","label":"History Job Interface","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api"},{"type":"link","label":"Ceneric Api","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api"},{"type":"link","label":"UDF Operations Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api"},{"type":"link","label":"BMLFS Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api"},{"type":"link","label":"Linkis Error Codes","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code"},{"type":"link","label":"Mdq Table Interface","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api"},{"type":"link","label":"Add Global Variable","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api"},{"type":"link","label":"Parameter Configuration","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api"},{"type":"link","label":"Instance Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api"},{"type":"link","label":"Filesystem","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api"},{"type":"link","label":"Admin Console Home Page Interface","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api"},{"type":"link","label":"BM Project Operation Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api"},{"type":"link","label":"BML Resource Management","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api"},{"type":"link","label":"DataSourceAdminRestfulApi","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api"},{"type":"link","label":"MetadataCoreRestful","href":"/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"LinkisManger Services","items":[{"type":"link","label":"EC Resource Information Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api"},{"type":"link","label":"ECM Resource Information Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api"},{"type":"link","label":"Engine Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api"},{"type":"link","label":"Resource Management","href":"/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Context Service","items":[{"type":"link","label":"Context History Service","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api"},{"type":"link","label":"Context API","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api"},{"type":"link","label":"Context Listening Service","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api"},{"type":"link","label":"Context Logging Service","href":"/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Engine Plugin Management Service","items":[{"type":"link","label":"Engine Material Refresh Interface","href":"/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh"},{"type":"link","label":"Engine Plugin Api","href":"/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Entrance Service","items":[{"type":"link","label":"Task Action","href":"/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api"},{"type":"link","label":"Task Management","href":"/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api"}],"collapsed":true,"collapsible":true}],"collapsed":true,"collapsible":true}],"collapsed":true,"collapsible":true},{"type":"category","label":"Table Structure","items":[{"type":"link","label":"UDF table structure","href":"/docs/1.1.3/table/udf-table"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Architecture","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/overview"},{"type":"link","label":"Difference Between 1.0 And 0.x","href":"/docs/1.1.3/architecture/difference_between_1.0_and_0.x"},{"type":"category","label":"Commons","items":[{"type":"link","label":"Custom Variable Design","href":"/docs/1.1.3/architecture/commons/variable"},{"type":"link","label":"RPC Module","href":"/docs/1.1.3/architecture/commons/rpc"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Computation Governance Services","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/computation_governance_services/overview"},{"type":"link","label":"Entrance Architecture Design","href":"/docs/1.1.3/architecture/computation_governance_services/entrance"},{"type":"category","label":"Linkis Manager","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview"},{"type":"link","label":"App Manager","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager"},{"type":"link","label":"Label Manager","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager"},{"type":"link","label":"Resource Manager","href":"/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Engine","items":[{"type":"link","label":"EngineConn Design","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn"},{"type":"link","label":"EngineConnManager Design","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager"},{"type":"link","label":"EngineConnPlugin (ECP) Design","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin"},{"type":"link","label":"Start engineConn","href":"/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn"},{"type":"link","label":"EngineConn History Features","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history"},{"type":"link","label":"EngineConn Metrics reporting feature","href":"/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics"}],"collapsed":true,"collapsible":true},{"type":"link","label":"Job Submission","href":"/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process"},{"type":"link","label":"Linkis-Client Architecture Design","href":"/docs/1.1.3/architecture/computation_governance_services/linkis-cli"},{"type":"link","label":"Proxy User Mode","href":"/docs/1.1.3/architecture/computation_governance_services/proxy_user"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Public Enhancement Services","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/public_enhancement_services/overview"},{"type":"link","label":"Public Service","href":"/docs/1.1.3/architecture/public_enhancement_services/public_service"},{"type":"category","label":"BML","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/public_enhancement_services/bml/overview"},{"type":"link","label":"Analysis of engin BML","href":"/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Context Service","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/overview"},{"type":"link","label":"CS Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service"},{"type":"link","label":"CS Cache Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache"},{"type":"link","label":"CS Client Design","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client"},{"type":"link","label":"CS HA Design","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable"},{"type":"link","label":"CS Listener Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener"},{"type":"link","label":"CS Persistence Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence"},{"type":"link","label":"CS Search Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search"},{"type":"link","label":"CS Cleanup Interface Features","href":"/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup"}],"collapsed":true,"collapsible":true},{"type":"link","label":"Data Source Management Service Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/datasource_manager"},{"type":"link","label":"Data Source Management Service Architecture","href":"/docs/1.1.3/architecture/public_enhancement_services/metadata_manager"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Microservice Governance Services","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/architecture/microservice_governance_services/overview"},{"type":"link","label":"Gateway Design","href":"/docs/1.1.3/architecture/microservice_governance_services/gateway"}],"collapsed":true,"collapsible":true}],"collapsed":true,"collapsible":true},{"type":"category","label":"Development Doc","items":[{"type":"link","label":"Compile And Package","href":"/docs/1.1.3/development/linkis_compile_and_package"},{"type":"link","label":"Introduction to Linkis Configuration Parameters","href":"/docs/1.1.3/development/linkis_config"},{"type":"link","label":"Linkis Debug","href":"/docs/1.1.3/development/linkis_debug"},{"type":"link","label":"Linkis Debug In Mac","href":"/docs/1.1.3/development/linkis_debug_in_mac"},{"type":"link","label":"How To Quickly Implement A New Engine","href":"/docs/1.1.3/development/new_engine_conn"},{"type":"link","label":"Linkis Console Compile","href":"/docs/1.1.3/development/web_build"},{"type":"link","label":"Swwager Annotation Instructions","href":"/docs/1.1.3/development/swwager_instructions"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Upgrade Guide","items":[{"type":"link","label":"Upgrade From 0.X To 1.0 Guide","href":"/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide"},{"type":"link","label":"Version upgrades above 1.0.3","href":"/docs/1.1.3/upgrade/upgrade_guide"}],"collapsed":true,"collapsible":true},{"type":"category","label":"Tuning And Troubleshooting","items":[{"type":"link","label":"Overview","href":"/docs/1.1.3/tuning_and_troubleshooting/overview"},{"type":"link","label":"Configurations","href":"/docs/1.1.3/tuning_and_troubleshooting/configuration"},{"type":"link","label":"Tuning","href":"/docs/1.1.3/tuning_and_troubleshooting/tuning"}],"collapsed":true,"collapsible":true}]}}')}}]); \ No newline at end of file diff --git a/assets/js/9dd8a0d2.d5b54caf.js b/assets/js/9dd8a0d2.d5b54caf.js new file mode 100644 index 00000000000..ba5813d2241 --- /dev/null +++ b/assets/js/9dd8a0d2.d5b54caf.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[87054,48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),r=a(72389),i=a(44996),c=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,r.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==c?void 0:c[e];return n.createElement("div",null,n.createElement("script",{src:"//cdn.matomo.cloud/apachelinkis.matomo.cloud/matomo.js"}),n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,i.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}},66206:function(e,t,a){a.r(t),a.d(t,{default:function(){return j}});var n=a(67294);var r=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)},i="object"==typeof global&&global&&global.Object===Object&&global,c="object"==typeof self&&self&&self.Object===Object&&self,o=i||c||Function("return this")(),l=function(){return o.Date.now()},s=/\s/;var m=function(e){for(var t=e.length;t--&&s.test(e.charAt(t)););return t},d=/^\s+/;var u=function(e){return e?e.slice(0,m(e)+1).replace(d,""):e},p=o.Symbol,h=Object.prototype,g=h.hasOwnProperty,f=h.toString,b=p?p.toStringTag:void 0;var v=function(e){var t=g.call(e,b),a=e[b];try{e[b]=void 0;var n=!0}catch(i){}var r=f.call(e);return n&&(t?e[b]=a:delete e[b]),r},A=Object.prototype.toString;var y=function(e){return A.call(e)},E=p?p.toStringTag:void 0;var N=function(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":E&&E in Object(e)?v(e):y(e)};var k=function(e){return null!=e&&"object"==typeof e};var w=function(e){return"symbol"==typeof e||k(e)&&"[object Symbol]"==N(e)},S=/^[-+]0x[0-9a-f]+$/i,x=/^0b[01]+$/i,T=/^0o[0-7]+$/i,C=parseInt;var L=function(e){if("number"==typeof e)return e;if(w(e))return NaN;if(r(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=r(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=u(e);var a=x.test(e);return a||T.test(e)?C(e.slice(2),a?2:8):S.test(e)?NaN:+e},z=Math.max,I=Math.min;var D=function(e,t,a){var n,i,c,o,s,m,d=0,u=!1,p=!1,h=!0;if("function"!=typeof e)throw new TypeError("Expected a function");function g(t){var a=n,r=i;return n=i=void 0,d=t,o=e.apply(r,a)}function f(e){return d=e,s=setTimeout(v,t),u?g(e):o}function b(e){var a=e-m;return void 0===m||a>=t||a<0||p&&e-d>=c}function v(){var e=l();if(b(e))return A(e);s=setTimeout(v,function(e){var a=t-(e-m);return p?I(a,c-(e-d)):a}(e))}function A(e){return s=void 0,h&&n?g(e):(n=i=void 0,o)}function y(){var e=l(),a=b(e);if(n=arguments,i=this,m=e,a){if(void 0===s)return f(m);if(p)return clearTimeout(s),s=setTimeout(v,t),g(m)}return void 0===s&&(s=setTimeout(v,t)),o}return t=L(t)||0,r(a)&&(u=!!a.leading,c=(p="maxWait"in a)?z(L(a.maxWait)||0,t):c,h="trailing"in a?!!a.trailing:h),y.cancel=function(){void 0!==s&&clearTimeout(s),d=0,n=m=i=s=void 0},y.flush=function(){return void 0===s?o:A(l())},y};var F=function(e,t,a){var n=!0,i=!0;if("function"!=typeof e)throw new TypeError("Expected a function");return r(a)&&(n="leading"in a?!!a.leading:n,i="trailing"in a?!!a.trailing:i),D(e,t,{leading:n,maxWait:t,trailing:i})},U=a(89276),B=a(52263),q=a(88458),R=a(72389);function j(){var e=(0,R.Z)(),t=(0,B.Z)().siteConfig,a=e&&location.pathname,r=function(){return"/"===a||"/zh-CN/"===a};return(0,n.useEffect)((function(){if(e){var t=document.getElementsByTagName("nav")[0],a=t&&t.classList;if(!a)return;r()?a.add("index-nav"):a.remove("index-nav"),window.onscroll=F((function(e){try{r()&&(e.target.scrollingElement.scrollTop>0?a.remove("index-nav"):a.add("index-nav"))}catch(t){console.warn(t)}}),150)}}),[e,a]),n.createElement(U.Z,{title:t.title,description:"Description will go into a meta tag in "},n.createElement("main",null,n.createElement(q.default,null)))}}}]); \ No newline at end of file diff --git a/assets/js/9dd8a0d2.e59e7145.js b/assets/js/9dd8a0d2.e59e7145.js deleted file mode 100644 index 225731f8da7..00000000000 --- a/assets/js/9dd8a0d2.e59e7145.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[87054,48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),r=a(72389),i=a(44996),c=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,r.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==c?void 0:c[e];return n.createElement("div",null,n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,i.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}},66206:function(e,t,a){a.r(t),a.d(t,{default:function(){return P}});var n=a(67294);var r=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)},i="object"==typeof global&&global&&global.Object===Object&&global,c="object"==typeof self&&self&&self.Object===Object&&self,o=i||c||Function("return this")(),l=function(){return o.Date.now()},s=/\s/;var m=function(e){for(var t=e.length;t--&&s.test(e.charAt(t)););return t},d=/^\s+/;var u=function(e){return e?e.slice(0,m(e)+1).replace(d,""):e},p=o.Symbol,h=Object.prototype,g=h.hasOwnProperty,f=h.toString,b=p?p.toStringTag:void 0;var v=function(e){var t=g.call(e,b),a=e[b];try{e[b]=void 0;var n=!0}catch(i){}var r=f.call(e);return n&&(t?e[b]=a:delete e[b]),r},A=Object.prototype.toString;var y=function(e){return A.call(e)},E=p?p.toStringTag:void 0;var N=function(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":E&&E in Object(e)?v(e):y(e)};var k=function(e){return null!=e&&"object"==typeof e};var w=function(e){return"symbol"==typeof e||k(e)&&"[object Symbol]"==N(e)},S=/^[-+]0x[0-9a-f]+$/i,x=/^0b[01]+$/i,T=/^0o[0-7]+$/i,C=parseInt;var L=function(e){if("number"==typeof e)return e;if(w(e))return NaN;if(r(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=r(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=u(e);var a=x.test(e);return a||T.test(e)?C(e.slice(2),a?2:8):S.test(e)?NaN:+e},z=Math.max,I=Math.min;var D=function(e,t,a){var n,i,c,o,s,m,d=0,u=!1,p=!1,h=!0;if("function"!=typeof e)throw new TypeError("Expected a function");function g(t){var a=n,r=i;return n=i=void 0,d=t,o=e.apply(r,a)}function f(e){return d=e,s=setTimeout(v,t),u?g(e):o}function b(e){var a=e-m;return void 0===m||a>=t||a<0||p&&e-d>=c}function v(){var e=l();if(b(e))return A(e);s=setTimeout(v,function(e){var a=t-(e-m);return p?I(a,c-(e-d)):a}(e))}function A(e){return s=void 0,h&&n?g(e):(n=i=void 0,o)}function y(){var e=l(),a=b(e);if(n=arguments,i=this,m=e,a){if(void 0===s)return f(m);if(p)return clearTimeout(s),s=setTimeout(v,t),g(m)}return void 0===s&&(s=setTimeout(v,t)),o}return t=L(t)||0,r(a)&&(u=!!a.leading,c=(p="maxWait"in a)?z(L(a.maxWait)||0,t):c,h="trailing"in a?!!a.trailing:h),y.cancel=function(){void 0!==s&&clearTimeout(s),d=0,n=m=i=s=void 0},y.flush=function(){return void 0===s?o:A(l())},y};var F=function(e,t,a){var n=!0,i=!0;if("function"!=typeof e)throw new TypeError("Expected a function");return r(a)&&(n="leading"in a?!!a.leading:n,i="trailing"in a?!!a.trailing:i),D(e,t,{leading:n,maxWait:t,trailing:i})},U=a(89276),B=a(52263),q=a(88458),R=a(72389);function P(){var e=(0,R.Z)(),t=(0,B.Z)().siteConfig,a=e&&location.pathname,r=function(){return"/"===a||"/zh-CN/"===a};return(0,n.useEffect)((function(){if(e){var t=document.getElementsByTagName("nav")[0],a=t&&t.classList;if(!a)return;r()?a.add("index-nav"):a.remove("index-nav"),window.onscroll=F((function(e){try{r()&&(e.target.scrollingElement.scrollTop>0?a.remove("index-nav"):a.add("index-nav"))}catch(t){console.warn(t)}}),150)}}),[e,a]),n.createElement(U.Z,{title:t.title,description:"Description will go into a meta tag in "},n.createElement("main",null,n.createElement(q.default,null)))}}}]); \ No newline at end of file diff --git a/assets/js/b96a8a04.4c65f9a0.js b/assets/js/b96a8a04.4c65f9a0.js deleted file mode 100644 index 8767fcc2da1..00000000000 --- a/assets/js/b96a8a04.4c65f9a0.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[22636],{3905:function(e,t,a){a.d(t,{Zo:function(){return u},kt:function(){return m}});var n=a(67294);function l(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(l[a]=e[a]);return l}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(l[a]=e[a])}return l}var o=n.createContext({}),s=function(e){var t=n.useContext(o),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(o.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var a=e.components,l=e.mdxType,r=e.originalType,o=e.parentName,u=p(e,["components","mdxType","originalType","parentName"]),d=s(a),m=l,k=d["".concat(o,".").concat(m)]||d[m]||c[m]||r;return a?n.createElement(k,i(i({ref:t},u),{},{components:a})):n.createElement(k,i({ref:t},u))}));function m(e,t){var a=arguments,l=t&&t.mdxType;if("string"==typeof e||l){var r=a.length,i=new Array(r);i[0]=d;var p={};for(var o in t)hasOwnProperty.call(t,o)&&(p[o]=t[o]);p.originalType=e,p.mdxType="string"==typeof e?e:l,i[1]=p;for(var s=2;s Key in Instruction Map Type Parameters> User Configuration> Default Configuration\n")))),(0,r.kt)("p",null,"Example:"),(0,r.kt)("p",null,"Configure engine startup parameters:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.param.conf.spark.executor.instances=3\n wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02\n")),(0,r.kt)("p",null,"Configure labelMap parameters:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.label.myLabel=label123\n")),(0,r.kt)("h4",{id:"six-output-result-set-to-file"},"Six, output result set to file"),(0,r.kt)("p",null,"Use the ",(0,r.kt)("inlineCode",{parentName:"p"},"-outPath")," parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"}," task-[taskId]-result-[idx].txt\n \n")),(0,r.kt)("p",null,"E.g:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"}," task-906-result-1.txt\n task-906-result-2.txt\n task-906-result-3.txt\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/b96a8a04.90bd7d8c.js b/assets/js/b96a8a04.90bd7d8c.js new file mode 100644 index 00000000000..9ff03b84dd4 --- /dev/null +++ b/assets/js/b96a8a04.90bd7d8c.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[22636],{3905:function(e,t,a){a.d(t,{Zo:function(){return u},kt:function(){return m}});var n=a(67294);function l(e,t,a){return t in e?Object.defineProperty(e,t,{value:a,enumerable:!0,configurable:!0,writable:!0}):e[t]=a,e}function r(e,t){var a=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),a.push.apply(a,n)}return a}function i(e){for(var t=1;t=0||(l[a]=e[a]);return l}(e,t);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,a)&&(l[a]=e[a])}return l}var o=n.createContext({}),s=function(e){var t=n.useContext(o),a=t;return e&&(a="function"==typeof e?e(t):i(i({},t),e)),a},u=function(e){var t=s(e.components);return n.createElement(o.Provider,{value:t},e.children)},c={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var a=e.components,l=e.mdxType,r=e.originalType,o=e.parentName,u=p(e,["components","mdxType","originalType","parentName"]),d=s(a),m=l,k=d["".concat(o,".").concat(m)]||d[m]||c[m]||r;return a?n.createElement(k,i(i({ref:t},u),{},{components:a})):n.createElement(k,i({ref:t},u))}));function m(e,t){var a=arguments,l=t&&t.mdxType;if("string"==typeof e||l){var r=a.length,i=new Array(r);i[0]=d;var p={};for(var o in t)hasOwnProperty.call(t,o)&&(p[o]=t[o]);p.originalType=e,p.mdxType="string"==typeof e?e:l,i[1]=p;for(var s=2;s Key in Instruction Map Type Parameters> User Configuration> Default Configuration\n")))),(0,r.kt)("p",null,"Example:"),(0,r.kt)("p",null,"Configure engine startup parameters:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.param.conf.spark.executor.instances=3\n wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02\n")),(0,r.kt)("p",null,"Configure labelMap parameters:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.label.myLabel=label123\n")),(0,r.kt)("h4",{id:"six-output-result-set-to-file"},"Six, output result set to file"),(0,r.kt)("p",null,"Use the ",(0,r.kt)("inlineCode",{parentName:"p"},"-outPath")," parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"}," task-[taskId]-result-[idx].txt\n \n")),(0,r.kt)("p",null,"E.g:"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"}," task-906-result-1.txt\n task-906-result-2.txt\n task-906-result-3.txt\n")))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.e7eb4b49.js b/assets/js/runtime~main.a4c60f50.js similarity index 91% rename from assets/js/runtime~main.e7eb4b49.js rename to assets/js/runtime~main.a4c60f50.js index 885babdfbc8..92af204a1cb 100644 --- a/assets/js/runtime~main.e7eb4b49.js +++ b/assets/js/runtime~main.a4c60f50.js @@ -1 +1 @@ -!function(){"use strict";var e,a,c,f,d,b={},t={};function n(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,n),c.loaded=!0,c.exports}n.m=b,n.c=t,e=[],n.O=function(a,c,f,d){if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(n.O).every((function(e){return n.O[e](c[r])}))?c.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},n.n=function(e){var a=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(a,{a:a}),a},c=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((function(a){b[a]=function(){return e[a]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,a){for(var c in a)n.o(a,c)&&!n.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(a,c){return n.f[c](e,a),a}),[]))},n.u=function(e){return"assets/js/"+({328:"4cc9882f",359:"2124c49e",639:"1ef83aab",1012:"b359ab7d",1226:"32c11423",1362:"6d204534",1402:"45047182",2170:"5526e2c8",2275:"f2e7bc47",2647:"d0685248",2698:"a534d5a4",2699:"6b4f6f6d",2794:"c976da7c",2814:"68f4675e",3009:"8e27a41e",3358:"ba75a7e1",3587:"45350984",3671:"6248a31d",4065:"217deffc",4280:"781d1b70",4380:"02f6a4b8",4443:"49ee9fc2",4452:"b047bf19",4484:"a854c309",4591:"77ddc047",4736:"7e21a02f",4984:"07edeecc",5140:"c3c3ee8a",5142:"d77f29dd",5233:"f2678917",5705:"166c3354",5909:"844135d6",6288:"e59b4707",6339:"e4bc1c20",6652:"78060cbc",6959:"322e6455",7015:"33f34b53",7029:"9b73e49d",7058:"d89de855",7342:"16b1aeb6",7479:"4c92610f",7586:"3e78e8ed",7766:"af1aaf24",8301:"a67041c8",8398:"6131eab8",8605:"5be510ab",8785:"127364d6",8890:"798fb933",9235:"e9ffd44c",9409:"15a0842e",9617:"cf38eb0d",9646:"4fdf3839",9782:"55dfda34",9828:"787028e7",9991:"d3b38238",10001:"8eb4e46b",10201:"57cd18ee",10218:"7cc7c4b1",10341:"a34d501f",10388:"b14f3fa2",10499:"e428c6d2",10527:"710884a6",10828:"5917547a",10967:"216ac574",11477:"b2f554cd",11539:"77745b3d",11657:"1a566584",11713:"a7023ddc",12072:"eee10519",12431:"5b37fdc8",12561:"2670bca3",12979:"2ee9677b",13050:"5f82aa37",13107:"cd7c5b9a",13134:"eba5f9c4",13143:"04bdf1ac",13244:"0f7894ab",13479:"8a12cfa4",13703:"2ea0638b",13751:"3720c009",13755:"54f9b777",13832:"07aed5a5",13948:"f32700a0",14065:"876124f9",14272:"50aee6de",14516:"abaaa1fe",14860:"e37a6402",14910:"51c20031",15287:"333c80e1",15450:"466720ab",15665:"908165ba",16274:"8625a1ce",16511:"83d17af4",16594:"7cc92f5c",16742:"4c05f83b",16884:"c2352a99",17167:"a184b6b2",17187:"b9f50d96",17234:"bc244d90",17284:"2cf4430d",17353:"3df00f5b",17542:"66d63bfc",17625:"9a3ec700",17951:"2d364229",18139:"43dc7314",18471:"6423b631",18674:"48d82b2e",18730:"d80dfec6",18855:"9968f92c",18907:"4f9fd1aa",19001:"41664cec",19096:"856315e7",19267:"b0f3eaa6",19468:"53baf039",19881:"70b31b37",20026:"21a12340",20490:"f16124a0",20522:"b7f5bbd5",20709:"6c7c2e71",20943:"9b480441",21065:"966e982b",21405:"948b0dab",21602:"68a93d86",22215:"c2471b2c",22636:"b96a8a04",23057:"3ab15d88",23851:"c9177f39",23946:"af6f9f26",24079:"87cf41e2",24153:"280df7e5",24438:"74337923",24825:"d28aee8d",24938:"ba1b8836",24958:"c38140d4",25048:"bf275373",25077:"dff35117",25146:"cac1e9bc",25246:"4eb6e5ee",25356:"94b02a9f",25757:"8f152d3b",26074:"2b9753f8",26107:"c5c3ab65",26234:"9154a6bd",26802:"52690743",26866:"d4051e29",27307:"bef57165",27377:"6f6118a9",27598:"8837ae6a",27624:"678743b7",27657:"970236dc",27693:"8a0722c3",27872:"5771c448",27918:"17896441",27991:"dc1e40d7",28267:"7a3788d1",28441:"bd518af2",28863:"6e916c0f",29025:"91b65c41",29231:"0c159898",29514:"1be78505",29893:"9c983a1e",29898:"db672e8f",30010:"778574bb",30021:"805f29de",30765:"2afb85b6",30870:"b571d381",31034:"5e40d2f9",31105:"dee797b6",31210:"eee5032f",31460:"f464b99a",31495:"fb75c206",31503:"e4594a63",31508:"41f5a6d2",31532:"1f5d6a30",31680:"fa2f5847",31684:"09d8c3a4",32089:"4470087f",32118:"fb1218a9",32185:"51fa421a",32227:"53424860",32236:"d1513e70",32367:"c00b49ad",32523:"05b3e639",32751:"a7c1a0ec",32887:"eb1549e9",33367:"1d9261ac",33492:"1bca3249",33560:"818823b9",33590:"1431e9dc",33763:"fff7b6e8",33893:"8422caaa",34224:"37d4f123",34353:"6580ced9",34610:"eea5f367",34643:"65df3d35",34817:"f6773039",35032:"046172dc",35328:"ad76bf80",35420:"51d0de41",35456:"8e7d50a2",35514:"5ab197a5",35600:"2c3c2ea6",35693:"68d19d38",35707:"2e1d0e00",35971:"eae3663a",36093:"69bdd21e",36129:"ace962cc",36298:"3b500f01",36565:"f77a6ffd",36882:"76f084ae",37039:"2520d203",37259:"bf8a911c",37410:"20a79681",38399:"4d8c07c4",38659:"190c673d",38758:"311f287b",38814:"a7a0ecb6",38890:"a40db232",38933:"7aecf381",39289:"8137d071",39633:"a546ef4e",39792:"f7c1c183",39976:"0861ade5",40116:"cf5d68e3",40206:"cfdaf306",40335:"bc34ddf5",40414:"e35b48a1",40512:"5845ef18",40561:"ead3ade5",40705:"b54b617c",40968:"cd50e9d9",40992:"aabbbc7e",41099:"a63939e6",41115:"0c77509b",41772:"1137ff4c",42140:"966b40b3",42150:"d13c5bfb",42479:"d039dc3f",42553:"531ae155",42632:"1e6a2ef9",43257:"2f1aac5b",43397:"ed17fbb9",43611:"291bb016",43956:"5534efc2",43976:"17ca8484",44008:"4ea65622",44043:"85bf98de",44265:"0fba09c7",44482:"18dd72b8",44544:"1866e095",44801:"8c3e10eb",44849:"9874d022",45037:"23992941",45141:"9969e5f7",45290:"6bb68e89",45327:"26e75e35",45389:"99fb9804",45589:"80900647",45602:"76bc5640",45661:"ffa367d4",45843:"efcf4ea7",45907:"57023425",46103:"ccc49370",46402:"9bfad1fd",46750:"82c182bc",46871:"1e131061",46939:"a3ba5b60",47025:"29707690",47176:"4fc9a01a",47224:"48988e0b",47371:"73d417a5",47429:"aff75f73",47760:"d3830ad4",47871:"541d169a",47953:"b5168e69",48360:"08bd5166",48493:"6d268c49",48510:"0a85ff3c",48610:"6875c492",48932:"248e03f5",48934:"afbc56b2",48951:"1a083444",48983:"2497064c",49073:"d32b6b2b",49352:"c1b1e234",49526:"fcd50b8b",49836:"afad409e",49933:"2e786fdc",50683:"ba7181fd",50765:"42e87eeb",50801:"631037e5",50947:"e5e4671e",50996:"9db1f0be",51334:"95aa0c9c",51358:"dfd0736f",51669:"b8401e80",51969:"b5a5e0cb",52057:"ed9f1119",52066:"5ca5940e",52341:"bcf26692",52535:"814f3328",52791:"24188f33",52825:"5e082069",52989:"6a2e0576",53233:"a56c6b7a",53249:"cddd8399",53325:"1af30dc4",53414:"71662ff9",53544:"05f3e170",53550:"6e8a7928",53608:"9e4087bc",53674:"91a4177b",53981:"dbdb3f24",54086:"93ca4beb",54118:"04b1c040",54185:"c6dac06e",54202:"fd400683",54351:"b0c58f26",54827:"d02ee2fb",55212:"4e5616f0",55604:"9b55b2aa",55739:"7ffe8452",55844:"3c939a9c",55901:"437a7bc9",56054:"5f35a0de",56436:"345c38fd",56556:"cc321d97",56592:"55c09602",56637:"f430c6df",56681:"c7bda2e7",56733:"a5bc72c7",57186:"d98b6f22",57242:"56b8ac01",57520:"eebdc9c6",57626:"c0917cb8",57791:"a76d6c80",58283:"7c24e110",58284:"73f2c183",58326:"e15bcb33",58375:"38c0935c",59273:"4449d5f1",59289:"b49531e7",59310:"3cd7ddbe",59591:"3a53518b",59656:"1f29c771",59732:"f14812ff",59801:"e950a7f9",60002:"9bbf01c8",60045:"48718d21",60109:"353f3947",60195:"7d7dfbbe",60302:"c125c302",60591:"f99625e8",60851:"6e38ab13",61285:"8661c2de",61428:"a39a9928",61551:"3195a7b0",61689:"dc79b1e9",61854:"adb71217",62039:"2e0f5cec",62111:"8c4b8e49",62121:"9a0fbc46",62226:"68da338b",62316:"bfea878c",62394:"9ca7809d",62829:"cfea7194",63020:"e0d4d0dd",63058:"3411059c",63107:"021a310b",63454:"dee130d7",63576:"dd16698b",63818:"8cec74d9",64013:"01a85c17",64063:"29b7f3be",64107:"cc1d4c18",64243:"88d31c16",64416:"dd194dbe",64422:"014a5837",64519:"8518a6d3",64599:"563ab102",64873:"e32089ea",64884:"2576ff29",64985:"3096f953",65078:"d182fb80",65112:"4ffbe17f",65444:"e39b4679",65467:"7beec960",65477:"554493c6",65574:"528e29f1",65628:"3b7a3f3e",65655:"bebc3ac6",65842:"399d48da",65932:"37daacb8",66170:"933c02a1",66226:"5fa70989",66277:"1984d11b",66562:"e73f859e",66627:"c16232bc",66778:"a1463431",66954:"d9cecb84",67153:"aefd1ce5",67285:"107b70ed",67301:"28b7232a",67442:"dac925f7",67492:"c34dd313",67493:"0e9a9e55",67525:"eb3832f3",67541:"7fdbf36e",67664:"a57f4178",67910:"4509e610",67982:"8a9e1376",68045:"a0fe705b",68097:"3ddf8900",68217:"ac5779b8",68398:"e9457a88",68539:"2f338473",68616:"4b35450a",68626:"41f3d1d4",68869:"5f5f4d9b",68952:"e323c1ba",69125:"70ee9ef1",69437:"5b5bbdd7",69468:"5c36283e",69618:"b3406135",69748:"6c4b5682",70033:"a2b6e306",70137:"9355e337",70144:"f67fe035",70525:"ba97a692",70625:"96e2cccf",70710:"25eabba4",70714:"5f92cd96",70831:"0aa128fb",70956:"8dd37400",71247:"5f7a42fa",71843:"dbca4a19",72129:"8532ad45",72498:"7ab3d102",72975:"408f120a",73028:"11295d65",73255:"fe07bdbe",73460:"089f961f",73464:"03021317",73566:"3dddbf8e",73657:"4f604ceb",74075:"fae86d7e",74095:"09b52532",74121:"55960ee5",74270:"3ec232d9",74436:"c875b05b",74549:"6c79c040",74692:"d93bf326",74783:"83bcd91d",74987:"769b7ddc",75024:"467cdcc7",75070:"3716aceb",75189:"d6b55977",75492:"240cbf48",75626:"af584b81",76306:"9f566abb",76374:"a831a863",77142:"f5c46a41",77258:"e98ff5dc",77269:"9508783d",77308:"dcbbe415",77408:"9f7b1adc",77518:"9e1c8ba3",77634:"32d2836b",77736:"3760967f",77999:"5657b1a5",78029:"77816f9e",78039:"9ac88ecc",78060:"360f41e6",78250:"b6f8819f",78273:"204b800a",78504:"c7719545",78785:"8d6cbe01",78793:"074f5eeb",79208:"02163d1c",79311:"d4bf935c",79807:"c4115680",79915:"d78d712a",80053:"935f2afb",80077:"e58ee7f4",80203:"ae644a35",80308:"441a7f95",80484:"5d613655",80576:"c2a9f04f",80732:"84e90c5f",80957:"1b338be2",81005:"fac6f2d4",81377:"c64e21de",81489:"27ca247f",81714:"d0342500",81842:"0ce26544",81926:"8f020eac",82060:"f5df6522",82241:"ead137ee",82545:"3d828cc6",82815:"9377a004",83175:"aa1e90ab",83390:"34759613",83440:"b1bf7260",83552:"e726b67f",83782:"261d0ea0",83792:"438501e2",83869:"28bf1441",83890:"28dfc6fb",84041:"ee6959ee",84128:"a09c2993",84217:"1d3c0678",84621:"34ac2676",84633:"e3bd683e",84849:"2c31ff43",84866:"22c54347",85050:"86f22513",85115:"44604fa9",85136:"8be741dd",85232:"d5927b70",85435:"0ad283e5",85449:"3fc514d2",85481:"c3ce6b05",85493:"cbc19f4b",85591:"3733e62b",85637:"96991cca",85760:"5e8c8a07",86561:"42c92bcd",86599:"117f37cd",86646:"33b6fdcc",86894:"2213fc24",87054:"9dd8a0d2",87280:"593ac3b1",87388:"0260d845",87456:"e29698a7",87464:"8aa67d88",87682:"b91032df",87709:"96c3c139",87754:"c0670030",87819:"802ad713",88187:"2c34c550",88433:"be8f9bda",88532:"755af260",88838:"9c38ddd2",89459:"4b002b59",89480:"79afda13",89670:"e4102989",89738:"c67d2a5e",89749:"8a8aa245",89778:"23b9c839",89801:"ce22cbd0",90064:"0b979966",90072:"c7ffffeb",90125:"fb16f602",90239:"25b6cbf3",90250:"36ef0f87",90371:"5aff8b89",90533:"b2b675dd",90728:"8eef043b",90814:"cff6a186",91360:"394d5a7a",91375:"02daaa8d",91610:"9d3e2903",91679:"ac7622a2",91799:"caaa2886",91951:"78c9ae28",91983:"64ba6d0f",92074:"8903e609",92200:"9ad029fd",92273:"b2171041",92290:"ff2037b4",92469:"18c1b595",92518:"94e63a1a",92519:"a94c1f1c",92706:"eb60262c",92715:"f0aa3789",92872:"0b1ac180",93042:"6cf48756",93089:"a6aa9e1f",93119:"027c2617",93171:"890e518c",93264:"2cdd8fc8",93316:"bc1274a5",93430:"dac27efb",93570:"87153e45",93586:"35ca84ad",93810:"be9aa551",93954:"7d75cf68",94073:"05a474a1",94507:"555c312a",94532:"4bcdbd8b",94551:"311a1527",94629:"caa9028b",94915:"83688337",94976:"c2340238",95159:"553f28ff",95597:"b0207dc0",95869:"7d88342b",95980:"7043a272",96170:"72790c29",96241:"0bec58d7",96476:"4ff8b690",96477:"e88d5fb1",96542:"cada9e63",96647:"4af5dc2e",96698:"e63e6ab4",96701:"72b06b07",96736:"659d5cde",97616:"306a8c6c",97915:"90e4ca75",97948:"8ddb8ae8",98049:"ef6c6ab7",98129:"38e24728",98311:"72df85c1",98701:"f25316fd",98981:"bab44dbb",99615:"d3701aa3",99784:"30778cf0",99874:"07a5f688",99923:"74e0d570",99924:"df203c0f",99928:"43220a19"}[e]||e)+"."+{328:"2474f8e0",359:"f7b4ac85",639:"638b6a46",1012:"70f1d962",1226:"ed34399c",1362:"ab7e1ec5",1402:"5bc6edcc",2170:"ebeebe3c",2275:"6d1db94e",2647:"442da986",2698:"0730c0af",2699:"65721931",2794:"c1a68b30",2814:"a92cdaeb",3009:"21284465",3358:"df766fe2",3587:"e046259c",3671:"14e36a4d",3829:"2a47bdd2",4065:"79e088d3",4280:"2100fb18",4380:"358cf740",4443:"f54a719f",4452:"7f0d1de7",4484:"36e9e778",4591:"06d9fd0b",4736:"95203970",4984:"8df42d59",5140:"9541a33c",5142:"691faa50",5233:"ea86fea6",5705:"79ff171c",5909:"92d7ee77",6288:"1e0bf6ea",6339:"71687c00",6652:"07533954",6959:"00ccd71f",7015:"05fff3ee",7029:"8812a8be",7058:"1bffe547",7342:"ce97d43f",7479:"f5f691b4",7586:"052eb7c6",7766:"728fc0bc",8301:"59a8c6bf",8398:"e309a3c2",8605:"bfa0c905",8785:"fa277bf8",8890:"1932c262",9235:"e318bcd5",9409:"17740e18",9617:"56b2b95d",9646:"f273a2d5",9782:"03300db1",9828:"02da4631",9991:"5aeac15e",10001:"5c87cb43",10201:"f27835d2",10218:"d01b2999",10341:"9bb4f8b8",10388:"eeb9709a",10499:"5a924c44",10527:"9843479b",10828:"a4553d0d",10967:"bdc81e1e",10972:"bfd6544a",11477:"33c87ff5",11539:"92360ac1",11657:"a6240e17",11713:"cd9cef4e",12072:"2b42fa6d",12431:"df0fd6b1",12561:"acba7467",12979:"eddb07fb",13050:"1ce768ca",13107:"82ea8621",13134:"76625832",13143:"c388d623",13244:"71f6240e",13479:"45266115",13703:"e6202db6",13751:"1f2d2496",13755:"1a67012a",13832:"742c41ef",13948:"183ab949",14065:"fc08bea3",14272:"45e9070e",14516:"70bdbe7e",14860:"e0f98972",14910:"4d7f4085",15287:"5889feb5",15450:"39242097",15665:"a614478f",16274:"45d17c34",16511:"2a3bafe5",16594:"73515851",16742:"59b8b60f",16884:"3cab5ebc",17167:"3ff11a94",17187:"ecd6e604",17234:"2fc0afee",17284:"d2fdc847",17353:"3d294842",17542:"fb94b0ca",17625:"125374d0",17951:"1e958367",18139:"695ac6ad",18471:"595921ec",18674:"1855f473",18730:"3d5c8ba9",18855:"cd86d191",18907:"ae33969e",19001:"7281a5cc",19096:"739e7244",19267:"f7eb1c5e",19468:"1b41c252",19881:"a9c9b079",20026:"76eee45c",20490:"4f98e0f2",20522:"07b86566",20709:"002bb2a0",20943:"425707be",21065:"a41e193b",21405:"8cad1850",21602:"61750831",22215:"a6a37ed8",22636:"4c65f9a0",23057:"1ad7eda9",23851:"aaf0b50e",23946:"ad78fa4c",24079:"3a7c022b",24153:"a85450b6",24438:"f1ca4a2b",24608:"9c4d2d11",24825:"817ca35c",24938:"fdbcbb23",24958:"bde97521",25048:"87f69e44",25077:"39f84e06",25146:"0997c567",25246:"b5a11b9d",25356:"e744b233",25757:"47e735a8",26074:"b87d3368",26107:"b5fdfa9c",26234:"ce958100",26802:"c15ebab9",26866:"b52b946a",27307:"a5de26aa",27377:"10548e4b",27598:"8b29878d",27624:"5389827e",27657:"b399705d",27693:"b34f260b",27872:"7eb52ea2",27918:"d0a8d8ed",27991:"4ac447ee",28267:"922c9c35",28441:"ff1cda8f",28863:"e78497cb",29025:"2f051fb4",29231:"6939bb2d",29514:"c2b68c2b",29893:"6400f7b4",29898:"fabe3892",30010:"a251ece7",30021:"8103d2bc",30765:"7d6a817f",30870:"0a7ea360",31034:"657c9eb3",31105:"601dcbb2",31210:"993f173d",31460:"73b8518f",31495:"c91216ad",31503:"2c1b93ad",31508:"a3894107",31532:"8a4f7677",31680:"823a7231",31684:"b02c9aa6",32089:"2e3743ec",32118:"c4b5d617",32185:"e3b8eb14",32227:"4cbb576e",32236:"d74dfb6a",32367:"e00f9da9",32523:"4e711026",32751:"35cffd82",32887:"32d9e8e2",33367:"2e74a184",33492:"18242563",33560:"3d37821a",33590:"ac05d671",33763:"13ea4824",33893:"9c209ddd",34224:"b1754939",34353:"5df010f9",34610:"1714674b",34643:"dea6b928",34817:"f5960bf6",35032:"593d5472",35328:"bcd6e1e2",35420:"979421bb",35456:"e55e1167",35514:"dbdedade",35600:"350ac85a",35693:"9fe9997d",35707:"029a4644",35971:"6fafd2f6",36093:"0ac5190c",36129:"d4d41b5b",36298:"3a36bf4e",36565:"ff24bae5",36882:"adaced3e",37039:"fb0272bc",37259:"f4c9441f",37410:"7a21e8ee",38399:"282f4b75",38659:"83321961",38758:"43634279",38814:"82effc26",38890:"3685fb76",38933:"2aef4719",39289:"b1fa1f56",39633:"00747b08",39792:"105fd077",39976:"df33ef04",40116:"940177fa",40206:"7600c823",40335:"62c09e15",40414:"02e194ae",40512:"894e2f8e",40561:"4b5a46ab",40705:"8f8b91e7",40968:"cb073fc8",40992:"9759ebdb",41099:"86c883cf",41115:"49dbda5f",41772:"396480ae",42140:"f4b0ef01",42150:"2e2273df",42479:"64d658ec",42553:"a62ebfeb",42632:"5305238a",43257:"776c3f9d",43397:"0a3344ae",43611:"19e99f2b",43956:"55f22f88",43976:"683d82c7",44008:"a5952380",44043:"dd23ea4d",44265:"c07ff1fb",44482:"ff4e5b9b",44544:"0cb89fa6",44801:"7bcab8c9",44849:"fd8a19d4",45037:"c1a9a579",45141:"416ea356",45290:"6e6bbb42",45327:"d9b995e6",45389:"2266523d",45589:"c6032b91",45602:"55d99a93",45661:"6d358b1a",45843:"3ce6215c",45907:"3a7096a4",46103:"c269c6ad",46402:"4bfe5322",46750:"2d2b65a4",46871:"6a0d45ec",46939:"af368ed5",46945:"4deecdf7",47025:"b2455dd2",47176:"d6a48d9b",47224:"8350e7bb",47371:"75ac95a0",47429:"fe4f0b48",47760:"9f080d13",47871:"775ce29b",47953:"ff7ca46e",48360:"704c48e5",48493:"a87b73be",48510:"9d33d44b",48610:"884dfaf4",48932:"0a2ccfb1",48934:"10f3a163",48951:"04e2ec14",48983:"99ada184",49073:"1b25eb30",49352:"43cad4ba",49526:"2e0b689f",49836:"cfaa7ef2",49933:"48463dd2",50683:"a872d20e",50765:"39abfd54",50801:"f8bc35d7",50947:"0cc3e088",50996:"79164d6e",51334:"f84c3fb8",51358:"b52f814a",51669:"781523b4",51969:"04895f4a",52057:"d414b0a4",52066:"13035838",52341:"73eb8e32",52535:"813fdab8",52791:"80206f7e",52825:"58568080",52989:"a3cd670c",53233:"861129ab",53249:"a92dedd1",53325:"8c55345d",53414:"2d023348",53544:"be4aee5c",53550:"38b2fc26",53608:"51fbf6e8",53674:"d66956ff",53981:"0501d788",54086:"fd0b8865",54118:"9d7dd33c",54185:"eedaaa83",54202:"8704e433",54351:"d2e4f02c",54827:"221219b1",55040:"d1cb509f",55212:"a98669a3",55604:"da90566d",55739:"c16587c9",55844:"8af1b4e3",55901:"ece48539",56054:"ea7982d5",56436:"126d99cf",56556:"843e3673",56592:"97df0ab9",56637:"c44dca55",56681:"bd27ea90",56733:"cbe4865f",57186:"24d8f4dc",57242:"c56ec24e",57520:"d7c702e7",57626:"b28cc499",57791:"3f2df52f",58283:"582213c5",58284:"02eb845f",58326:"b50a1ad3",58375:"299a811d",59273:"20dcc2e0",59289:"6bf5e12c",59310:"60d2b2a2",59591:"db1cce21",59656:"69668bd4",59732:"ebea58ca",59801:"80e44253",60002:"953a68a1",60045:"32fa5591",60109:"5e7dacc4",60195:"0977f346",60302:"ce44938f",60591:"c5eb60b7",60851:"e2d6a9ba",61285:"9125b074",61428:"5d865764",61551:"32bdbf62",61689:"f27a7a8d",61854:"f7f140d1",62039:"ab30f483",62111:"1cd62ed7",62121:"c0d66c5c",62226:"ce1b52d2",62316:"2e53f7c7",62394:"127504f9",62829:"e29b823a",63020:"2061029d",63058:"bffba846",63107:"2452e806",63454:"a5822721",63576:"55f24d1e",63818:"d881a858",64013:"75f40f00",64063:"9ba6cd49",64107:"bbac9179",64243:"64199d87",64416:"66cc4e72",64422:"7f37a95e",64519:"88bf6eb9",64599:"bee6e2cd",64873:"2869890f",64884:"10798e59",64985:"f6241921",65078:"7777c109",65112:"9205de00",65444:"8a6f349f",65467:"ece7bf27",65477:"9e2aac8b",65574:"a3fd16aa",65628:"c261c2b6",65655:"eb3568af",65842:"ec8d426c",65932:"ae8658cd",66170:"6cceac36",66226:"8047968b",66277:"531643ed",66562:"d1db0526",66627:"ccccff81",66778:"87cc0099",66954:"5c118f9f",67153:"2bd3f770",67285:"7d99c1c6",67301:"35652e99",67442:"77572a1a",67492:"7473edac",67493:"bb01ace5",67525:"81b0243d",67541:"498e1050",67664:"6e803a88",67910:"030fcd0a",67982:"90e28c3e",68045:"60b02929",68097:"064ef2b9",68217:"51ed944b",68398:"7aec6936",68539:"c61fff36",68616:"e453b2b9",68626:"f3bcaf98",68869:"aa78a90d",68952:"4b2959bc",69125:"bbaaa208",69437:"801cd286",69468:"04631c0e",69618:"93ac4698",69748:"92a03eee",70033:"c81dd0c8",70137:"368a5099",70144:"92d738b0",70525:"23197b34",70625:"7957c2c3",70710:"50baea1c",70714:"96cdf24d",70831:"99acb492",70956:"cd763dcb",71247:"8b4ea1ef",71843:"21cf01bf",72129:"c8ec0c77",72498:"5d7c0346",72975:"5680c01b",73028:"688cc111",73255:"42586d2b",73460:"5f5bb905",73464:"3900e296",73566:"e4f00ea4",73657:"9ec77816",74075:"4055cc8e",74095:"151ea8e0",74121:"89f11c39",74270:"0cfc0b2e",74436:"49293339",74549:"ac95ce6c",74692:"1eaaee6e",74783:"b8d70a26",74987:"8fa80799",75024:"74c9462d",75070:"6a69a2f3",75189:"53ebff88",75492:"449a8d40",75626:"b82ec7c1",76306:"58f424ba",76374:"e84b10de",77142:"58e97215",77258:"7d12cb50",77269:"bb2b45c0",77308:"b9bacdeb",77408:"6ab376ff",77518:"77ebdecc",77634:"0c20142e",77736:"640151b2",77999:"5a2db566",78029:"84c90162",78039:"87a215ee",78060:"9273d2d9",78250:"83166b8e",78273:"8fdb4e87",78504:"b7737141",78785:"1961bedc",78793:"459e3f97",79208:"9f4dc055",79311:"d55e217d",79807:"11748ff0",79915:"cfd9d0b4",80053:"cabe5c87",80077:"39adf45f",80203:"09ec72d0",80308:"426a8693",80484:"3e4b8dd7",80576:"02760f9d",80732:"f9e2201b",80957:"4fd62edd",81005:"fbb3df04",81377:"b564a3f3",81489:"20017eac",81714:"406f0762",81842:"de2dc423",81926:"5a6e4cc3",82060:"43ad112d",82241:"b48d5c7e",82545:"92663992",82815:"e6bb7000",83175:"65ab5364",83390:"bfb8ce7f",83440:"43e78a9e",83552:"ab1cd766",83782:"a73df2b7",83792:"f5d52c8a",83869:"c4848101",83890:"9f8a4ccb",84041:"87972b88",84128:"3f768f80",84217:"7e5fd77d",84621:"9ec906be",84633:"48525805",84849:"f38a38a5",84866:"5fd157bf",85050:"18d3c9cb",85115:"710d4a0c",85136:"8a0afea4",85232:"58fb1e51",85435:"15019197",85449:"31077f7f",85481:"aec9457a",85493:"29f86198",85591:"f933c583",85637:"a5671c75",85760:"f6e0a06a",86561:"c3c75307",86599:"7fbea23f",86646:"b4060782",86894:"d2081f40",87054:"e59e7145",87280:"de394ad9",87388:"93580f74",87456:"c4c4342f",87464:"cbddcd27",87682:"0c8a7c17",87709:"e08cb997",87754:"0d4a573e",87819:"9cda9b8b",88187:"33cb0fba",88433:"70b6fd4d",88532:"3a9112b8",88838:"ce6821fb",89459:"81b65887",89480:"79950ca4",89670:"8898bde3",89738:"79c0042d",89749:"dd663aed",89778:"85cc1d55",89801:"63b959ff",90064:"47ace020",90072:"e8a8f895",90125:"94b03ee0",90239:"58d3e98f",90250:"128dc57e",90371:"e4037c42",90533:"b8164802",90728:"471deafa",90814:"f59a7fa7",91360:"168ad11f",91375:"a293ad8a",91610:"9bace73b",91679:"9d380da4",91799:"255a9367",91951:"9047373d",91983:"a1d5ce3c",92074:"6b207a3b",92200:"2c247713",92273:"c616aef0",92290:"b3d7d750",92469:"89da1599",92518:"ac395260",92519:"aa4dfbd2",92706:"bb0be4f8",92715:"3e1cf124",92872:"fa5b18bf",93042:"a01f354a",93089:"1ea8fcbb",93119:"3c04575c",93171:"cfe43121",93264:"bddb9a40",93316:"99fd8323",93430:"d92b7d53",93570:"7e2183ff",93586:"2bdb92d3",93810:"92610bb8",93954:"dc9c9686",94073:"fe70bc9b",94507:"6d2364b5",94532:"e2dcc3c9",94551:"dfdd8a66",94629:"2eaf9893",94915:"4be70ab3",94976:"8f29526e",95159:"346a075c",95597:"0f252f81",95869:"060bac3c",95980:"e3a399ad",96170:"256bc801",96241:"47e8e610",96476:"57f1e942",96477:"36284a74",96542:"eee928eb",96647:"efbfbff6",96698:"1506ccce",96701:"aac9cc11",96736:"436b2353",97616:"431d1e7c",97915:"0b4f047d",97948:"96b083c6",98049:"95b7d2f3",98129:"dca84b90",98311:"675ef83e",98701:"6d5f5af6",98981:"c903e5f1",99615:"0c948941",99784:"fff93b62",99874:"fa81c65b",99923:"c6bbe583",99924:"5ffd65fa",99928:"fc84903d"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.92d87943.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,a){return Object.prototype.hasOwnProperty.call(e,a)},f={},d="linkis-web-apache:",n.l=function(e,a,c,b){if(f[e])f[e].push(a);else{var t,r;if(void 0!==c)for(var o=document.getElementsByTagName("script"),i=0;i=d)&&Object.keys(n.O).every((function(e){return n.O[e](a[r])}))?a.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[a,f,d]},n.n=function(e){var c=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(c,{a:c}),c},a=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};c=c||[null,a({}),a([]),a(a)];for(var t=2&f&&e;"object"==typeof t&&!~c.indexOf(t);t=a(t))Object.getOwnPropertyNames(t).forEach((function(c){b[c]=function(){return e[c]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,c){for(var a in c)n.o(c,a)&&!n.o(e,a)&&Object.defineProperty(e,a,{enumerable:!0,get:c[a]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(c,a){return n.f[a](e,c),c}),[]))},n.u=function(e){return"assets/js/"+({328:"4cc9882f",359:"2124c49e",639:"1ef83aab",1012:"b359ab7d",1226:"32c11423",1362:"6d204534",1402:"45047182",2170:"5526e2c8",2275:"f2e7bc47",2647:"d0685248",2698:"a534d5a4",2699:"6b4f6f6d",2794:"c976da7c",2814:"68f4675e",3009:"8e27a41e",3358:"ba75a7e1",3587:"45350984",3671:"6248a31d",4065:"217deffc",4280:"781d1b70",4380:"02f6a4b8",4443:"49ee9fc2",4452:"b047bf19",4484:"a854c309",4591:"77ddc047",4736:"7e21a02f",4984:"07edeecc",5140:"c3c3ee8a",5142:"d77f29dd",5233:"f2678917",5705:"166c3354",5909:"844135d6",6288:"e59b4707",6339:"e4bc1c20",6652:"78060cbc",6959:"322e6455",7015:"33f34b53",7029:"9b73e49d",7058:"d89de855",7342:"16b1aeb6",7479:"4c92610f",7586:"3e78e8ed",7766:"af1aaf24",8301:"a67041c8",8398:"6131eab8",8605:"5be510ab",8785:"127364d6",8890:"798fb933",9235:"e9ffd44c",9409:"15a0842e",9617:"cf38eb0d",9646:"4fdf3839",9782:"55dfda34",9828:"787028e7",9991:"d3b38238",10001:"8eb4e46b",10201:"57cd18ee",10218:"7cc7c4b1",10341:"a34d501f",10388:"b14f3fa2",10499:"e428c6d2",10527:"710884a6",10828:"5917547a",10967:"216ac574",11477:"b2f554cd",11539:"77745b3d",11657:"1a566584",11713:"a7023ddc",12072:"eee10519",12431:"5b37fdc8",12561:"2670bca3",12979:"2ee9677b",13050:"5f82aa37",13107:"cd7c5b9a",13134:"eba5f9c4",13143:"04bdf1ac",13244:"0f7894ab",13479:"8a12cfa4",13703:"2ea0638b",13751:"3720c009",13755:"54f9b777",13832:"07aed5a5",13948:"f32700a0",14065:"876124f9",14272:"50aee6de",14516:"abaaa1fe",14860:"e37a6402",14910:"51c20031",15287:"333c80e1",15450:"466720ab",15665:"908165ba",16274:"8625a1ce",16511:"83d17af4",16594:"7cc92f5c",16742:"4c05f83b",16884:"c2352a99",17167:"a184b6b2",17187:"b9f50d96",17234:"bc244d90",17284:"2cf4430d",17353:"3df00f5b",17542:"66d63bfc",17625:"9a3ec700",17951:"2d364229",18139:"43dc7314",18471:"6423b631",18674:"48d82b2e",18730:"d80dfec6",18855:"9968f92c",18907:"4f9fd1aa",19001:"41664cec",19096:"856315e7",19267:"b0f3eaa6",19468:"53baf039",19881:"70b31b37",20026:"21a12340",20490:"f16124a0",20522:"b7f5bbd5",20709:"6c7c2e71",20943:"9b480441",21065:"966e982b",21405:"948b0dab",21602:"68a93d86",22215:"c2471b2c",22636:"b96a8a04",23057:"3ab15d88",23851:"c9177f39",23946:"af6f9f26",24079:"87cf41e2",24153:"280df7e5",24438:"74337923",24825:"d28aee8d",24938:"ba1b8836",24958:"c38140d4",25048:"bf275373",25077:"dff35117",25146:"cac1e9bc",25246:"4eb6e5ee",25356:"94b02a9f",25757:"8f152d3b",26074:"2b9753f8",26107:"c5c3ab65",26234:"9154a6bd",26802:"52690743",26866:"d4051e29",27307:"bef57165",27377:"6f6118a9",27598:"8837ae6a",27624:"678743b7",27657:"970236dc",27693:"8a0722c3",27872:"5771c448",27918:"17896441",27991:"dc1e40d7",28267:"7a3788d1",28441:"bd518af2",28863:"6e916c0f",29025:"91b65c41",29231:"0c159898",29514:"1be78505",29893:"9c983a1e",29898:"db672e8f",30010:"778574bb",30021:"805f29de",30765:"2afb85b6",30870:"b571d381",31034:"5e40d2f9",31105:"dee797b6",31210:"eee5032f",31460:"f464b99a",31495:"fb75c206",31503:"e4594a63",31508:"41f5a6d2",31532:"1f5d6a30",31680:"fa2f5847",31684:"09d8c3a4",32089:"4470087f",32118:"fb1218a9",32185:"51fa421a",32227:"53424860",32236:"d1513e70",32367:"c00b49ad",32523:"05b3e639",32751:"a7c1a0ec",32887:"eb1549e9",33367:"1d9261ac",33492:"1bca3249",33560:"818823b9",33590:"1431e9dc",33763:"fff7b6e8",33893:"8422caaa",34224:"37d4f123",34353:"6580ced9",34610:"eea5f367",34643:"65df3d35",34817:"f6773039",35032:"046172dc",35328:"ad76bf80",35420:"51d0de41",35456:"8e7d50a2",35514:"5ab197a5",35600:"2c3c2ea6",35693:"68d19d38",35707:"2e1d0e00",35971:"eae3663a",36093:"69bdd21e",36129:"ace962cc",36298:"3b500f01",36565:"f77a6ffd",36882:"76f084ae",37039:"2520d203",37259:"bf8a911c",37410:"20a79681",38399:"4d8c07c4",38659:"190c673d",38758:"311f287b",38814:"a7a0ecb6",38890:"a40db232",38933:"7aecf381",39289:"8137d071",39633:"a546ef4e",39792:"f7c1c183",39976:"0861ade5",40116:"cf5d68e3",40206:"cfdaf306",40335:"bc34ddf5",40414:"e35b48a1",40512:"5845ef18",40561:"ead3ade5",40705:"b54b617c",40968:"cd50e9d9",40992:"aabbbc7e",41099:"a63939e6",41115:"0c77509b",41772:"1137ff4c",42140:"966b40b3",42150:"d13c5bfb",42479:"d039dc3f",42553:"531ae155",42632:"1e6a2ef9",43257:"2f1aac5b",43397:"ed17fbb9",43611:"291bb016",43956:"5534efc2",43976:"17ca8484",44008:"4ea65622",44043:"85bf98de",44265:"0fba09c7",44482:"18dd72b8",44544:"1866e095",44801:"8c3e10eb",44849:"9874d022",45037:"23992941",45141:"9969e5f7",45290:"6bb68e89",45327:"26e75e35",45389:"99fb9804",45589:"80900647",45602:"76bc5640",45661:"ffa367d4",45843:"efcf4ea7",45907:"57023425",46103:"ccc49370",46402:"9bfad1fd",46750:"82c182bc",46871:"1e131061",46939:"a3ba5b60",47025:"29707690",47176:"4fc9a01a",47224:"48988e0b",47371:"73d417a5",47429:"aff75f73",47760:"d3830ad4",47871:"541d169a",47953:"b5168e69",48360:"08bd5166",48493:"6d268c49",48510:"0a85ff3c",48610:"6875c492",48932:"248e03f5",48934:"afbc56b2",48951:"1a083444",48983:"2497064c",49073:"d32b6b2b",49352:"c1b1e234",49526:"fcd50b8b",49836:"afad409e",49933:"2e786fdc",50683:"ba7181fd",50765:"42e87eeb",50801:"631037e5",50947:"e5e4671e",50996:"9db1f0be",51334:"95aa0c9c",51358:"dfd0736f",51669:"b8401e80",51969:"b5a5e0cb",52057:"ed9f1119",52066:"5ca5940e",52341:"bcf26692",52535:"814f3328",52791:"24188f33",52825:"5e082069",52989:"6a2e0576",53233:"a56c6b7a",53249:"cddd8399",53325:"1af30dc4",53414:"71662ff9",53544:"05f3e170",53550:"6e8a7928",53608:"9e4087bc",53674:"91a4177b",53981:"dbdb3f24",54086:"93ca4beb",54118:"04b1c040",54185:"c6dac06e",54202:"fd400683",54351:"b0c58f26",54827:"d02ee2fb",55212:"4e5616f0",55604:"9b55b2aa",55739:"7ffe8452",55844:"3c939a9c",55901:"437a7bc9",56054:"5f35a0de",56436:"345c38fd",56556:"cc321d97",56592:"55c09602",56637:"f430c6df",56681:"c7bda2e7",56733:"a5bc72c7",57186:"d98b6f22",57242:"56b8ac01",57520:"eebdc9c6",57626:"c0917cb8",57791:"a76d6c80",58283:"7c24e110",58284:"73f2c183",58326:"e15bcb33",58375:"38c0935c",59273:"4449d5f1",59289:"b49531e7",59310:"3cd7ddbe",59591:"3a53518b",59656:"1f29c771",59732:"f14812ff",59801:"e950a7f9",60002:"9bbf01c8",60045:"48718d21",60109:"353f3947",60195:"7d7dfbbe",60302:"c125c302",60591:"f99625e8",60851:"6e38ab13",61285:"8661c2de",61428:"a39a9928",61551:"3195a7b0",61689:"dc79b1e9",61854:"adb71217",62039:"2e0f5cec",62111:"8c4b8e49",62121:"9a0fbc46",62226:"68da338b",62316:"bfea878c",62394:"9ca7809d",62829:"cfea7194",63020:"e0d4d0dd",63058:"3411059c",63107:"021a310b",63454:"dee130d7",63576:"dd16698b",63818:"8cec74d9",64013:"01a85c17",64063:"29b7f3be",64107:"cc1d4c18",64243:"88d31c16",64416:"dd194dbe",64422:"014a5837",64519:"8518a6d3",64599:"563ab102",64873:"e32089ea",64884:"2576ff29",64985:"3096f953",65078:"d182fb80",65112:"4ffbe17f",65444:"e39b4679",65467:"7beec960",65477:"554493c6",65574:"528e29f1",65628:"3b7a3f3e",65655:"bebc3ac6",65842:"399d48da",65932:"37daacb8",66170:"933c02a1",66226:"5fa70989",66277:"1984d11b",66562:"e73f859e",66627:"c16232bc",66778:"a1463431",66954:"d9cecb84",67153:"aefd1ce5",67285:"107b70ed",67301:"28b7232a",67442:"dac925f7",67492:"c34dd313",67493:"0e9a9e55",67525:"eb3832f3",67541:"7fdbf36e",67664:"a57f4178",67910:"4509e610",67982:"8a9e1376",68045:"a0fe705b",68097:"3ddf8900",68217:"ac5779b8",68398:"e9457a88",68539:"2f338473",68616:"4b35450a",68626:"41f3d1d4",68869:"5f5f4d9b",68952:"e323c1ba",69125:"70ee9ef1",69437:"5b5bbdd7",69468:"5c36283e",69618:"b3406135",69748:"6c4b5682",70033:"a2b6e306",70137:"9355e337",70144:"f67fe035",70525:"ba97a692",70625:"96e2cccf",70710:"25eabba4",70714:"5f92cd96",70831:"0aa128fb",70956:"8dd37400",71247:"5f7a42fa",71843:"dbca4a19",72129:"8532ad45",72498:"7ab3d102",72975:"408f120a",73028:"11295d65",73255:"fe07bdbe",73460:"089f961f",73464:"03021317",73566:"3dddbf8e",73657:"4f604ceb",74075:"fae86d7e",74095:"09b52532",74121:"55960ee5",74270:"3ec232d9",74436:"c875b05b",74549:"6c79c040",74692:"d93bf326",74783:"83bcd91d",74987:"769b7ddc",75024:"467cdcc7",75070:"3716aceb",75189:"d6b55977",75492:"240cbf48",75626:"af584b81",76306:"9f566abb",76374:"a831a863",77142:"f5c46a41",77258:"e98ff5dc",77269:"9508783d",77308:"dcbbe415",77408:"9f7b1adc",77518:"9e1c8ba3",77634:"32d2836b",77736:"3760967f",77999:"5657b1a5",78029:"77816f9e",78039:"9ac88ecc",78060:"360f41e6",78250:"b6f8819f",78273:"204b800a",78504:"c7719545",78785:"8d6cbe01",78793:"074f5eeb",79208:"02163d1c",79311:"d4bf935c",79807:"c4115680",79915:"d78d712a",80053:"935f2afb",80077:"e58ee7f4",80203:"ae644a35",80308:"441a7f95",80484:"5d613655",80576:"c2a9f04f",80732:"84e90c5f",80957:"1b338be2",81005:"fac6f2d4",81377:"c64e21de",81489:"27ca247f",81714:"d0342500",81842:"0ce26544",81926:"8f020eac",82060:"f5df6522",82241:"ead137ee",82545:"3d828cc6",82815:"9377a004",83175:"aa1e90ab",83390:"34759613",83440:"b1bf7260",83552:"e726b67f",83782:"261d0ea0",83792:"438501e2",83869:"28bf1441",83890:"28dfc6fb",84041:"ee6959ee",84128:"a09c2993",84217:"1d3c0678",84621:"34ac2676",84633:"e3bd683e",84849:"2c31ff43",84866:"22c54347",85050:"86f22513",85115:"44604fa9",85136:"8be741dd",85232:"d5927b70",85435:"0ad283e5",85449:"3fc514d2",85481:"c3ce6b05",85493:"cbc19f4b",85591:"3733e62b",85637:"96991cca",85760:"5e8c8a07",86561:"42c92bcd",86599:"117f37cd",86646:"33b6fdcc",86894:"2213fc24",87054:"9dd8a0d2",87280:"593ac3b1",87388:"0260d845",87456:"e29698a7",87464:"8aa67d88",87682:"b91032df",87709:"96c3c139",87754:"c0670030",87819:"802ad713",88187:"2c34c550",88433:"be8f9bda",88532:"755af260",88838:"9c38ddd2",89459:"4b002b59",89480:"79afda13",89670:"e4102989",89738:"c67d2a5e",89749:"8a8aa245",89778:"23b9c839",89801:"ce22cbd0",90064:"0b979966",90072:"c7ffffeb",90125:"fb16f602",90239:"25b6cbf3",90250:"36ef0f87",90371:"5aff8b89",90533:"b2b675dd",90728:"8eef043b",90814:"cff6a186",91360:"394d5a7a",91375:"02daaa8d",91610:"9d3e2903",91679:"ac7622a2",91799:"caaa2886",91951:"78c9ae28",91983:"64ba6d0f",92074:"8903e609",92200:"9ad029fd",92273:"b2171041",92290:"ff2037b4",92469:"18c1b595",92518:"94e63a1a",92519:"a94c1f1c",92706:"eb60262c",92715:"f0aa3789",92872:"0b1ac180",93042:"6cf48756",93089:"a6aa9e1f",93119:"027c2617",93171:"890e518c",93264:"2cdd8fc8",93316:"bc1274a5",93430:"dac27efb",93570:"87153e45",93586:"35ca84ad",93810:"be9aa551",93954:"7d75cf68",94073:"05a474a1",94507:"555c312a",94532:"4bcdbd8b",94551:"311a1527",94629:"caa9028b",94915:"83688337",94976:"c2340238",95159:"553f28ff",95597:"b0207dc0",95869:"7d88342b",95980:"7043a272",96170:"72790c29",96241:"0bec58d7",96476:"4ff8b690",96477:"e88d5fb1",96542:"cada9e63",96647:"4af5dc2e",96698:"e63e6ab4",96701:"72b06b07",96736:"659d5cde",97616:"306a8c6c",97915:"90e4ca75",97948:"8ddb8ae8",98049:"ef6c6ab7",98129:"38e24728",98311:"72df85c1",98701:"f25316fd",98981:"bab44dbb",99615:"d3701aa3",99784:"30778cf0",99874:"07a5f688",99923:"74e0d570",99924:"df203c0f",99928:"43220a19"}[e]||e)+"."+{328:"2474f8e0",359:"f7b4ac85",639:"638b6a46",1012:"70f1d962",1226:"ed34399c",1362:"ab7e1ec5",1402:"5bc6edcc",2170:"ebeebe3c",2275:"6d1db94e",2647:"442da986",2698:"0730c0af",2699:"65721931",2794:"c1a68b30",2814:"a92cdaeb",3009:"21284465",3358:"df766fe2",3587:"e046259c",3671:"14e36a4d",3829:"2a47bdd2",4065:"79e088d3",4280:"2100fb18",4380:"358cf740",4443:"f54a719f",4452:"7f0d1de7",4484:"36e9e778",4591:"06d9fd0b",4736:"95203970",4984:"8df42d59",5140:"9541a33c",5142:"691faa50",5233:"ea86fea6",5705:"79ff171c",5909:"92d7ee77",6288:"1e0bf6ea",6339:"71687c00",6652:"07533954",6959:"00ccd71f",7015:"05fff3ee",7029:"8812a8be",7058:"1bffe547",7342:"ce97d43f",7479:"f5f691b4",7586:"052eb7c6",7766:"728fc0bc",8301:"59a8c6bf",8398:"e309a3c2",8605:"bfa0c905",8785:"fa277bf8",8890:"1932c262",9235:"e318bcd5",9409:"17740e18",9617:"56b2b95d",9646:"f273a2d5",9782:"03300db1",9828:"02da4631",9991:"5aeac15e",10001:"5c87cb43",10201:"f27835d2",10218:"d01b2999",10341:"9bb4f8b8",10388:"eeb9709a",10499:"5a924c44",10527:"0f90f48a",10828:"a4553d0d",10967:"bdc81e1e",10972:"bfd6544a",11477:"33c87ff5",11539:"92360ac1",11657:"a6240e17",11713:"cd9cef4e",12072:"2b42fa6d",12431:"df0fd6b1",12561:"acba7467",12979:"eddb07fb",13050:"1ce768ca",13107:"82ea8621",13134:"76625832",13143:"c388d623",13244:"71f6240e",13479:"45266115",13703:"e6202db6",13751:"1f2d2496",13755:"1a67012a",13832:"742c41ef",13948:"183ab949",14065:"fc08bea3",14272:"45e9070e",14516:"70bdbe7e",14860:"e0f98972",14910:"4d7f4085",15287:"5889feb5",15450:"39242097",15665:"a614478f",16274:"45d17c34",16511:"2a3bafe5",16594:"73515851",16742:"59b8b60f",16884:"3cab5ebc",17167:"3ff11a94",17187:"ecd6e604",17234:"2fc0afee",17284:"d2fdc847",17353:"3d294842",17542:"fb94b0ca",17625:"125374d0",17951:"1e958367",18139:"695ac6ad",18471:"595921ec",18674:"1855f473",18730:"3d5c8ba9",18855:"cd86d191",18907:"ae33969e",19001:"7281a5cc",19096:"739e7244",19267:"f7eb1c5e",19468:"1b41c252",19881:"a9c9b079",20026:"76eee45c",20490:"4f98e0f2",20522:"07b86566",20709:"002bb2a0",20943:"425707be",21065:"a41e193b",21405:"8cad1850",21602:"61750831",22215:"a6a37ed8",22636:"90bd7d8c",23057:"1ad7eda9",23851:"aaf0b50e",23946:"ad78fa4c",24079:"3a7c022b",24153:"a85450b6",24438:"f1ca4a2b",24608:"9c4d2d11",24825:"817ca35c",24938:"fdbcbb23",24958:"bde97521",25048:"87f69e44",25077:"39f84e06",25146:"0997c567",25246:"b5a11b9d",25356:"e744b233",25757:"47e735a8",26074:"b87d3368",26107:"b5fdfa9c",26234:"ce958100",26802:"c15ebab9",26866:"b52b946a",27307:"a5de26aa",27377:"10548e4b",27598:"8b29878d",27624:"5389827e",27657:"b399705d",27693:"b34f260b",27872:"7eb52ea2",27918:"d0a8d8ed",27991:"4ac447ee",28267:"922c9c35",28441:"ff1cda8f",28863:"e78497cb",29025:"2f051fb4",29231:"6939bb2d",29514:"c2b68c2b",29893:"6400f7b4",29898:"fabe3892",30010:"a251ece7",30021:"8103d2bc",30765:"7d6a817f",30870:"0a7ea360",31034:"657c9eb3",31105:"601dcbb2",31210:"993f173d",31460:"73b8518f",31495:"c91216ad",31503:"2c1b93ad",31508:"a3894107",31532:"8a4f7677",31680:"823a7231",31684:"b02c9aa6",32089:"2e3743ec",32118:"c4b5d617",32185:"e3b8eb14",32227:"4cbb576e",32236:"d74dfb6a",32367:"e00f9da9",32523:"4e711026",32751:"35cffd82",32887:"32d9e8e2",33367:"2e74a184",33492:"18242563",33560:"3d37821a",33590:"ac05d671",33763:"13ea4824",33893:"9c209ddd",34224:"b1754939",34353:"5df010f9",34610:"1714674b",34643:"0e132c20",34817:"f5960bf6",35032:"593d5472",35328:"bcd6e1e2",35420:"979421bb",35456:"e55e1167",35514:"dbdedade",35600:"350ac85a",35693:"9fe9997d",35707:"029a4644",35971:"6fafd2f6",36093:"0ac5190c",36129:"d4d41b5b",36298:"3a36bf4e",36565:"ff24bae5",36882:"adaced3e",37039:"fb0272bc",37259:"f4c9441f",37410:"7a21e8ee",38399:"282f4b75",38659:"83321961",38758:"43634279",38814:"82effc26",38890:"3685fb76",38933:"2aef4719",39289:"b1fa1f56",39633:"00747b08",39792:"105fd077",39976:"df33ef04",40116:"940177fa",40206:"7600c823",40335:"62c09e15",40414:"02e194ae",40512:"894e2f8e",40561:"4b5a46ab",40705:"8f8b91e7",40968:"cb073fc8",40992:"9759ebdb",41099:"86c883cf",41115:"49dbda5f",41772:"396480ae",42140:"f4b0ef01",42150:"2e2273df",42479:"64d658ec",42553:"a62ebfeb",42632:"5305238a",43257:"776c3f9d",43397:"0a3344ae",43611:"19e99f2b",43956:"55f22f88",43976:"683d82c7",44008:"a5952380",44043:"dd23ea4d",44265:"c07ff1fb",44482:"ff4e5b9b",44544:"0cb89fa6",44801:"7bcab8c9",44849:"fd8a19d4",45037:"c1a9a579",45141:"416ea356",45290:"6e6bbb42",45327:"d9b995e6",45389:"2266523d",45589:"c6032b91",45602:"55d99a93",45661:"6d358b1a",45843:"3ce6215c",45907:"3a7096a4",46103:"c269c6ad",46402:"4bfe5322",46750:"4d6fec64",46871:"6a0d45ec",46939:"af368ed5",46945:"4deecdf7",47025:"b2455dd2",47176:"d6a48d9b",47224:"8350e7bb",47371:"75ac95a0",47429:"fe4f0b48",47760:"9f080d13",47871:"775ce29b",47953:"ff7ca46e",48360:"8f04ad0b",48493:"a87b73be",48510:"9d33d44b",48610:"884dfaf4",48932:"0a2ccfb1",48934:"10f3a163",48951:"04e2ec14",48983:"99ada184",49073:"1b25eb30",49352:"43cad4ba",49526:"2e0b689f",49836:"cfaa7ef2",49933:"48463dd2",50683:"a872d20e",50765:"39abfd54",50801:"f8bc35d7",50947:"0cc3e088",50996:"79164d6e",51334:"f84c3fb8",51358:"b52f814a",51669:"781523b4",51969:"04895f4a",52057:"d414b0a4",52066:"43c8f57b",52341:"73eb8e32",52535:"813fdab8",52791:"80206f7e",52825:"58568080",52989:"a3cd670c",53233:"861129ab",53249:"a92dedd1",53325:"8c55345d",53414:"2d023348",53544:"be4aee5c",53550:"38b2fc26",53608:"51fbf6e8",53674:"d66956ff",53981:"0501d788",54086:"fd0b8865",54118:"9d7dd33c",54185:"eedaaa83",54202:"8704e433",54351:"d2e4f02c",54827:"221219b1",55040:"d1cb509f",55212:"a98669a3",55604:"da90566d",55739:"c16587c9",55844:"8af1b4e3",55901:"ece48539",56054:"ea7982d5",56436:"126d99cf",56556:"843e3673",56592:"97df0ab9",56637:"c44dca55",56681:"bd27ea90",56733:"cbe4865f",57186:"24d8f4dc",57242:"c56ec24e",57520:"d7c702e7",57626:"b28cc499",57791:"3f2df52f",58283:"582213c5",58284:"02eb845f",58326:"b50a1ad3",58375:"299a811d",59273:"20dcc2e0",59289:"6bf5e12c",59310:"60d2b2a2",59591:"db1cce21",59656:"69668bd4",59732:"ebea58ca",59801:"80e44253",60002:"953a68a1",60045:"32fa5591",60109:"5e7dacc4",60195:"0977f346",60302:"ce44938f",60591:"c5eb60b7",60851:"e2d6a9ba",61285:"9125b074",61428:"5d865764",61551:"32bdbf62",61689:"f27a7a8d",61854:"f7f140d1",62039:"ab30f483",62111:"1cd62ed7",62121:"c0d66c5c",62226:"ce1b52d2",62316:"2e53f7c7",62394:"127504f9",62829:"e29b823a",63020:"2061029d",63058:"bffba846",63107:"2452e806",63454:"a5822721",63576:"55f24d1e",63818:"d881a858",64013:"75f40f00",64063:"9ba6cd49",64107:"bbac9179",64243:"64199d87",64416:"66cc4e72",64422:"7f37a95e",64519:"88bf6eb9",64599:"bee6e2cd",64873:"2869890f",64884:"10798e59",64985:"f6241921",65078:"7777c109",65112:"9205de00",65444:"8a6f349f",65467:"ece7bf27",65477:"9e2aac8b",65574:"a3fd16aa",65628:"c261c2b6",65655:"eb3568af",65842:"ec8d426c",65932:"ae8658cd",66170:"6cceac36",66226:"8047968b",66277:"531643ed",66562:"d1db0526",66627:"ccccff81",66778:"87cc0099",66954:"5c118f9f",67153:"2bd3f770",67285:"7d99c1c6",67301:"35652e99",67442:"77572a1a",67492:"7473edac",67493:"bb01ace5",67525:"81b0243d",67541:"498e1050",67664:"6e803a88",67910:"030fcd0a",67982:"90e28c3e",68045:"60b02929",68097:"064ef2b9",68217:"51ed944b",68398:"7aec6936",68539:"c61fff36",68616:"e453b2b9",68626:"f3bcaf98",68869:"aa78a90d",68952:"4b2959bc",69125:"bbaaa208",69437:"801cd286",69468:"04631c0e",69618:"93ac4698",69748:"92a03eee",70033:"c81dd0c8",70137:"368a5099",70144:"92d738b0",70525:"23197b34",70625:"7957c2c3",70710:"50baea1c",70714:"96cdf24d",70831:"99acb492",70956:"cd763dcb",71247:"8b4ea1ef",71843:"21cf01bf",72129:"c8ec0c77",72498:"5d7c0346",72975:"5680c01b",73028:"688cc111",73255:"42586d2b",73460:"5f5bb905",73464:"3900e296",73566:"e4f00ea4",73657:"9ec77816",74075:"4055cc8e",74095:"151ea8e0",74121:"89f11c39",74270:"0cfc0b2e",74436:"49293339",74549:"ac95ce6c",74692:"1eaaee6e",74783:"b8d70a26",74987:"8fa80799",75024:"74c9462d",75070:"6a69a2f3",75189:"53ebff88",75492:"449a8d40",75626:"b82ec7c1",76306:"58f424ba",76374:"e84b10de",77142:"58e97215",77258:"7d12cb50",77269:"bb2b45c0",77308:"b9bacdeb",77408:"6ab376ff",77518:"77ebdecc",77634:"0c20142e",77736:"640151b2",77999:"5a2db566",78029:"84c90162",78039:"87a215ee",78060:"9273d2d9",78250:"83166b8e",78273:"8fdb4e87",78504:"b7737141",78785:"1961bedc",78793:"459e3f97",79208:"9f4dc055",79311:"d55e217d",79807:"11748ff0",79915:"cfd9d0b4",80053:"23dd1aac",80077:"39adf45f",80203:"09ec72d0",80308:"426a8693",80484:"3e4b8dd7",80576:"02760f9d",80732:"f9e2201b",80957:"4fd62edd",81005:"fbb3df04",81377:"b564a3f3",81489:"20017eac",81714:"406f0762",81842:"de2dc423",81926:"5a6e4cc3",82060:"43ad112d",82241:"b48d5c7e",82545:"92663992",82815:"e6bb7000",83175:"65ab5364",83390:"bfb8ce7f",83440:"43e78a9e",83552:"ab1cd766",83782:"a73df2b7",83792:"f5d52c8a",83869:"c4848101",83890:"9f8a4ccb",84041:"87972b88",84128:"3f768f80",84217:"7e5fd77d",84621:"9ec906be",84633:"48525805",84849:"f38a38a5",84866:"5fd157bf",85050:"18d3c9cb",85115:"710d4a0c",85136:"8a0afea4",85232:"58fb1e51",85435:"15019197",85449:"31077f7f",85481:"aec9457a",85493:"29f86198",85591:"f933c583",85637:"a5671c75",85760:"f6e0a06a",86561:"c3c75307",86599:"7fbea23f",86646:"b4060782",86894:"d2081f40",87054:"d5b54caf",87280:"de394ad9",87388:"93580f74",87456:"c4c4342f",87464:"cbddcd27",87682:"0c8a7c17",87709:"e08cb997",87754:"0d4a573e",87819:"9cda9b8b",88187:"33cb0fba",88433:"70b6fd4d",88532:"3a9112b8",88838:"ce6821fb",89459:"81b65887",89480:"79950ca4",89670:"8898bde3",89738:"79c0042d",89749:"dd663aed",89778:"85cc1d55",89801:"63b959ff",90064:"47ace020",90072:"e8a8f895",90125:"94b03ee0",90239:"012976b7",90250:"128dc57e",90371:"e4037c42",90533:"b8164802",90728:"471deafa",90814:"f59a7fa7",91360:"168ad11f",91375:"a293ad8a",91610:"9bace73b",91679:"9d380da4",91799:"255a9367",91951:"9047373d",91983:"a1d5ce3c",92074:"6b207a3b",92200:"2c247713",92273:"c616aef0",92290:"b3d7d750",92469:"89da1599",92518:"ac395260",92519:"aa4dfbd2",92706:"bb0be4f8",92715:"3e1cf124",92872:"fa5b18bf",93042:"a01f354a",93089:"1ea8fcbb",93119:"3c04575c",93171:"cfe43121",93264:"bddb9a40",93316:"99fd8323",93430:"d92b7d53",93570:"7e2183ff",93586:"2bdb92d3",93810:"92610bb8",93954:"dc9c9686",94073:"fe70bc9b",94507:"6d2364b5",94532:"e2dcc3c9",94551:"dfdd8a66",94629:"2eaf9893",94915:"4be70ab3",94976:"8f29526e",95159:"346a075c",95597:"0f252f81",95869:"060bac3c",95980:"e3a399ad",96170:"256bc801",96241:"47e8e610",96476:"57f1e942",96477:"36284a74",96542:"eee928eb",96647:"efbfbff6",96698:"1506ccce",96701:"aac9cc11",96736:"436b2353",97616:"431d1e7c",97915:"0b4f047d",97948:"96b083c6",98049:"95b7d2f3",98129:"dca84b90",98311:"675ef83e",98701:"6d5f5af6",98981:"c903e5f1",99615:"0c948941",99784:"fff93b62",99874:"fa81c65b",99923:"c6bbe583",99924:"5ffd65fa",99928:"fc84903d"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.92d87943.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,c){return Object.prototype.hasOwnProperty.call(e,c)},f={},d="linkis-web-apache:",n.l=function(e,c,a,b){if(f[e])f[e].push(c);else{var t,r;if(void 0!==a)for(var o=document.getElementsByTagName("script"),i=0;i Apache Linkis - + @@ -15,7 +15,7 @@
Linkis Blessing Wall
We hope that WeDataSphere will become better and better, and work together for national digital transformation.
We hope that the WeDataSphere community can contribute more open source projects to you in the field of data platform, truly help you reduce the landing cost of big data technology and improve the efficiency of enterprise big data use.
I wish WeDataSphere better and better, and more projects will become top projects of Apache. Thank you for the strong support and help of handsome, qiang, Hua, You, Ping and other community leaders in product use and secondary development.
We hope that the WeDataSphere will become more and more robust and the community will become more and more active.
I have been in contact with Weizhong DSS for more than half a year. The excellent framework design of DSS, combined with the Linkis computing middleware, greatly simplifies the development of upper layer applications. With the help of DSS, our team has quickly developed modules of multiple data consoles. It is hoped that the DSS community will become better and better, and more developers will contribute code, so that Linkis can support more underlying engines and enrich the third-party application components of the DSS community ecology.
DSS has done a great thing: let the world have no difficult big data Road, let everyone be an analyst no longer out of reach, and let the one-stop development, analysis, operation and maintenance no longer be separated. Pay tribute to DSS.
We very much agree with the one-stop concept of WeDataSphere, which well shields the complexity of the underlying open source components, greatly reduces the threshold of big data development, allows users to focus more on their own business implementation and maximize the value of data. Although the out of the box effect has not been achieved due to the complexity of technology and the diversity of components, this is the direction of our efforts. As more users and experts join the project and continue to polish the product, the product will become more and more intelligent and humanized in the future, and the future can be expected. In addition, as a developer, I am honored to participate in the development of the WeDataSphere linkis component. I hope I can contribute more to the community.
<
1
2
3
4
5
6
7
>
- + \ No newline at end of file diff --git a/blog/2022/02/08/how-to-user-blog/index.html b/blog/2022/02/08/how-to-user-blog/index.html index 127bb3c181b..a831cbfa297 100644 --- a/blog/2022/02/08/how-to-user-blog/index.html +++ b/blog/2022/02/08/how-to-user-blog/index.html @@ -7,7 +7,7 @@ How to Write a Blog | Apache Linkis - + @@ -28,7 +28,7 @@ blog/authors.yml

Casion:   name: Casion   title: Development Engineer of WeBank   url: https://github.com/casionone/   image_url: https://avatars.githubusercontent.com/u/7869972?v=4
- + \ No newline at end of file diff --git a/blog/2022/02/21/linkis-deploy/index.html b/blog/2022/02/21/linkis-deploy/index.html index 488a38dc531..b5e436247b1 100644 --- a/blog/2022/02/21/linkis-deploy/index.html +++ b/blog/2022/02/21/linkis-deploy/index.html @@ -7,7 +7,7 @@ Linkis Deployment Troubleshooting | Apache Linkis - + @@ -74,7 +74,7 @@ search

7. How to obtain relevant information#

Linkis official website documents are constantly improving, you can view/keyword search related documents on this official website.

Related blog post links

- + \ No newline at end of file diff --git a/blog/2022/03/20/openlookeng/index.html b/blog/2022/03/20/openlookeng/index.html index 115d7d0ad1f..58cdb4bf1c9 100644 --- a/blog/2022/03/20/openlookeng/index.html +++ b/blog/2022/03/20/openlookeng/index.html @@ -7,7 +7,7 @@ Implementation of OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ image

The capabilities based on Linkie and OpenLooKeng can provide the following capabilities:

    1. The connection capability of the computing middleware layer based on Linkis allows upper-layer application tools to quickly connect to OpenLooKeng, submit tasks, and obtain logs, progress, and results.
    1. Based on the public service capability of Linkis, it can complete custom variable substitution, UDF management, etc. for OpenLooKeng's sql
    1. Based on the context capability of Linkis, the results of OpengLooKeng can be passed to downstream ECs such as Spark and Hive for query
    1. Linkis-based resource management and multi-tenancy capabilities can isolate tasks from tenants and use OpenLooKeng resources
    1. Based on OpengLooKeng's connector capability, the upper-layer application tool can complete the task of submitting cross-source heterogeneous query, cross-domain and cross-DC query type, and get a second-level return.

Follow-up plans#

In the future, the two communities will continue to cooperate and plan to launch the following functions:

  • 1.Linkis supports OpenLooKeng on Yarn mode
    1. Linkis has completed the resource management and control of OpenLooKeng, tasks can now be queued by Linkis, and submitted only when resources are sufficient
    1. Based on the mixed computing ability of OpenLooKeng, the ability of Linkis Orchestrator is optimized to complete the mixed computing ability between ECs in the subsequent plan.
- + \ No newline at end of file diff --git a/blog/2022/04/15/how-to-download-engineconn-plugin/index.html b/blog/2022/04/15/how-to-download-engineconn-plugin/index.html index bd3a75d37c4..fbc619d327b 100644 --- a/blog/2022/04/15/how-to-download-engineconn-plugin/index.html +++ b/blog/2022/04/15/how-to-download-engineconn-plugin/index.html @@ -7,7 +7,7 @@ How to Download Engine Plugins Not Included in the Installation Package By Default | Apache Linkis - + @@ -18,7 +18,7 @@

Copy the engine material package to be used to the engine plug-in directory of linkis, and then refresh the engine material.

Detailed process referenceInstalling EngineConnPlugin engine.

- + \ No newline at end of file diff --git a/blog/2022/06/09/meetup-content-review/index.html b/blog/2022/06/09/meetup-content-review/index.html index 5a8639ae06e..5af125eff6a 100644 --- a/blog/2022/06/09/meetup-content-review/index.html +++ b/blog/2022/06/09/meetup-content-review/index.html @@ -7,7 +7,7 @@ Apache Linkis(Incubating) Meep Up | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/blog/2022/07/04/how-to-add-auto-bot/index.html b/blog/2022/07/04/how-to-add-auto-bot/index.html index 3ba91d06465..054186ae6e0 100644 --- a/blog/2022/07/04/how-to-add-auto-bot/index.html +++ b/blog/2022/07/04/how-to-add-auto-bot/index.html @@ -7,7 +7,7 @@ How to add a GitHub Action for the GitHub repository | Apache Linkis - + @@ -32,7 +32,7 @@ - name: Close Issue uses: peter-evans/close-issue@v2 if: ${{ github.event.pull_request.merged }} with: issue-number: ${{ steps.Closer.outputs.issueNumber }} comment: The associated PR has been merged, this issue is automatically closed, you can reopend if necessary. env: Github_Token: ${{ secrets.GITHUB_TOKEN }} PRNUM: ${{ github.event.pull_request.number }}
- + \ No newline at end of file diff --git a/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html b/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html index 3a2e7a4b043..59d7de93ce2 100644 --- a/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html +++ b/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html @@ -7,7 +7,7 @@ deploy linkis with kubernetes | Apache Linkis - + @@ -48,7 +48,7 @@ kubectl config view  kubectl config get-contexts  kubectl cluster-info  
- + \ No newline at end of file diff --git a/blog/archive/index.html b/blog/archive/index.html index 8c66da7be54..3c494b01846 100644 --- a/blog/archive/index.html +++ b/blog/archive/index.html @@ -7,7 +7,7 @@ Archive | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/blog/index.html b/blog/index.html index 623a883df91..7eda3bdf639 100644 --- a/blog/index.html +++ b/blog/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -72,7 +72,7 @@ image

The capabilities based on Linkie and OpenLooKeng can provide the following capabilities:

    1. The connection capability of the computing middleware layer based on Linkis allows upper-layer application tools to quickly connect to OpenLooKeng, submit tasks, and obtain logs, progress, and results.
    1. Based on the public service capability of Linkis, it can complete custom variable substitution, UDF management, etc. for OpenLooKeng's sql
    1. Based on the context capability of Linkis, the results of OpengLooKeng can be passed to downstream ECs such as Spark and Hive for query
    1. Linkis-based resource management and multi-tenancy capabilities can isolate tasks from tenants and use OpenLooKeng resources
    1. Based on OpengLooKeng's connector capability, the upper-layer application tool can complete the task of submitting cross-source heterogeneous query, cross-domain and cross-DC query type, and get a second-level return.

Follow-up plans#

In the future, the two communities will continue to cooperate and plan to launch the following functions:

  • 1.Linkis supports OpenLooKeng on Yarn mode
    1. Linkis has completed the resource management and control of OpenLooKeng, tasks can now be queued by Linkis, and submitted only when resources are sufficient
    1. Based on the mixed computing ability of OpenLooKeng, the ability of Linkis Orchestrator is optimized to complete the mixed computing ability between ECs in the subsequent plan.
- + \ No newline at end of file diff --git a/blog/page/2/index.html b/blog/page/2/index.html index 6413f905688..22318b43ed8 100644 --- a/blog/page/2/index.html +++ b/blog/page/2/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -74,7 +74,7 @@ search

7. How to obtain relevant information#

Linkis official website documents are constantly improving, you can view/keyword search related documents on this official website.

Related blog post links

· 4 min read
Casion

This article mainly guides you how to publish blog posts on the Linkis official website. You are welcome to submit blog post documents about Apache Linkis, including but not limited to Linkis installation/source code analysis/architecture/experience sharing.

This article mainly refers to Docusaurus' official [blog post specifications and examples] (https://docusaurus.io/zh-CN/blog). The guidelines and specifications may not be perfect. Any comments or suggestions are welcome.

- + \ No newline at end of file diff --git a/blog/tags/blog/index.html b/blog/tags/blog/index.html index bfee7dac2ad..c7c8dc0cea1 100644 --- a/blog/tags/blog/index.html +++ b/blog/tags/blog/index.html @@ -7,7 +7,7 @@ One post tagged with "blog" | Apache Linkis - + @@ -15,7 +15,7 @@

One post tagged with "blog"

View All Tags

· 4 min read
Casion

This article mainly guides you how to publish blog posts on the Linkis official website. You are welcome to submit blog post documents about Apache Linkis, including but not limited to Linkis installation/source code analysis/architecture/experience sharing.

This article mainly refers to Docusaurus' official [blog post specifications and examples] (https://docusaurus.io/zh-CN/blog). The guidelines and specifications may not be perfect. Any comments or suggestions are welcome.

- + \ No newline at end of file diff --git a/blog/tags/engine/index.html b/blog/tags/engine/index.html index 7adb13cff87..69f203b51af 100644 --- a/blog/tags/engine/index.html +++ b/blog/tags/engine/index.html @@ -7,7 +7,7 @@ 2 posts tagged with "engine" | Apache Linkis - + @@ -22,7 +22,7 @@ image

The capabilities based on Linkie and OpenLooKeng can provide the following capabilities:

    1. The connection capability of the computing middleware layer based on Linkis allows upper-layer application tools to quickly connect to OpenLooKeng, submit tasks, and obtain logs, progress, and results.
    1. Based on the public service capability of Linkis, it can complete custom variable substitution, UDF management, etc. for OpenLooKeng's sql
    1. Based on the context capability of Linkis, the results of OpengLooKeng can be passed to downstream ECs such as Spark and Hive for query
    1. Linkis-based resource management and multi-tenancy capabilities can isolate tasks from tenants and use OpenLooKeng resources
    1. Based on OpengLooKeng's connector capability, the upper-layer application tool can complete the task of submitting cross-source heterogeneous query, cross-domain and cross-DC query type, and get a second-level return.

Follow-up plans#

In the future, the two communities will continue to cooperate and plan to launch the following functions:

  • 1.Linkis supports OpenLooKeng on Yarn mode
    1. Linkis has completed the resource management and control of OpenLooKeng, tasks can now be queued by Linkis, and submitted only when resources are sufficient
    1. Based on the mixed computing ability of OpenLooKeng, the ability of Linkis Orchestrator is optimized to complete the mixed computing ability between ECs in the subsequent plan.
- + \ No newline at end of file diff --git a/blog/tags/github/index.html b/blog/tags/github/index.html index 6e7f99b51e7..dd6d27a0225 100644 --- a/blog/tags/github/index.html +++ b/blog/tags/github/index.html @@ -7,7 +7,7 @@ 2 posts tagged with "github" | Apache Linkis - + @@ -65,7 +65,7 @@ - name: Close Issue uses: peter-evans/close-issue@v2 if: ${{ github.event.pull_request.merged }} with: issue-number: ${{ steps.Closer.outputs.issueNumber }} comment: The associated PR has been merged, this issue is automatically closed, you can reopend if necessary. env: Github_Token: ${{ secrets.GITHUB_TOKEN }} PRNUM: ${{ github.event.pull_request.number }}
- + \ No newline at end of file diff --git a/blog/tags/guide/index.html b/blog/tags/guide/index.html index 7ed6c328cf3..99a77eae1cc 100644 --- a/blog/tags/guide/index.html +++ b/blog/tags/guide/index.html @@ -7,7 +7,7 @@ 2 posts tagged with "guide" | Apache Linkis - + @@ -18,7 +18,7 @@

Copy the engine material package to be used to the engine plug-in directory of linkis, and then refresh the engine material.

Detailed process referenceInstalling EngineConnPlugin engine.

· 4 min read
Casion

This article mainly guides you how to publish blog posts on the Linkis official website. You are welcome to submit blog post documents about Apache Linkis, including but not limited to Linkis installation/source code analysis/architecture/experience sharing.

This article mainly refers to Docusaurus' official [blog post specifications and examples] (https://docusaurus.io/zh-CN/blog). The guidelines and specifications may not be perfect. Any comments or suggestions are welcome.

- + \ No newline at end of file diff --git a/blog/tags/index.html b/blog/tags/index.html index 26f83475b38..9415739e7ca 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/blog/tags/meetup/index.html b/blog/tags/meetup/index.html index 23954dff7ce..5598f57ca7a 100644 --- a/blog/tags/meetup/index.html +++ b/blog/tags/meetup/index.html @@ -7,7 +7,7 @@ One post tagged with "meetup" | Apache Linkis - + @@ -15,7 +15,7 @@
- + \ No newline at end of file diff --git a/community/apache-product-name-usage-guide/index.html b/community/apache-product-name-usage-guide/index.html index f3f6650f802..e2d1e428feb 100644 --- a/community/apache-product-name-usage-guide/index.html +++ b/community/apache-product-name-usage-guide/index.html @@ -7,7 +7,7 @@ Apache Product Name Usage Guide | Apache Linkis - + @@ -15,7 +15,7 @@

Apache Product Name Usage Guide

See https://www.apache.org/foundation/marks/guide for detailed and complete instructions

The first and most prominent mentions must use the full form: "Apache Linkis" of the name for any individual usage (webpage, handout, slides, etc.) Depending on the context and writing style, you should use the full form of the name sufficiently often to ensure that readers clearly understand the association of both the Linkis project and the Linkis software product to the ASF as the parent organization.

Later in each specific document you may use the bare form of the name, i.e. just Linkis, as best suits your writing style.

More specifically:

For use by software vendors or software-related service providers, or when organizations or organization-branded pages are discussing Linkis (or any other Apache brand) in relation to any non-Apache provided software products or services, extra care is required to maintain the independent and vendor-neutral reputation of the Apache brand.

The full form of the name must be used in at least these cases:

  • Titles or subtitles, including web page title or description metadata.

  • The first and most prominent header elements within any major document section.

  • The first and most prominent callout, sidebar, or other types of highlighted blocks of content displayed to the user.

  • The first and most prominent uses in running or body text within the document.

  • For graphics headers or diagrams, the full form of the name must be clear in the graphic itself where practical; if not, the full form of the name must be used in a prominent caption header, or accompanying explanation of the graphic.

  • For video content, in the title and first uses, as well as the last use or any use in credits, must be the full form of the name.

  • Proper trademark attributions must also be provided, either in page footers, or in a clearly marked Terms, Legal, Trademarks, or other commonly-named secondary page within a website.

  • For use by other kinds of users (i.e. organizations or individuals not primarily providing software products or services somehow related to the Apache software product being discussed):

  • For scholarly or academic work: Use the full form in titles, subtitles, the first and most prominent references in headers, callouts, or other highlighted sections, and the first and most prominent references in running or body text. Use of the bare form afterwards is permitted.

  • For regularly published media (books, magazines, journalists): ensure the full form is used titles or subtitles, the first and most prominent references in headers, callouts, or other highlighted sections, and the first and most prominent references in running or body text. Otherwise, follow your normal publisher's guidelines for referring to software product names.

  • For personal bloggers or individuals: We appreciate the use of the full form of the name in titles and the first and most prominent uses in running or body text.

  • Uses of Apache brands in domain names or event names and brands are covered in their own policies.

- + \ No newline at end of file diff --git a/community/development_specification/api/index.html b/community/development_specification/api/index.html index 3a08aa98058..9160e835819 100644 --- a/community/development_specification/api/index.html +++ b/community/development_specification/api/index.html @@ -7,7 +7,7 @@ API Specification | Apache Linkis - + @@ -26,7 +26,7 @@

Convention :

  • method: Returns the requested RESTful API URL, mainly for the WebSocket mode.

  • status: Returns status information, where: -1 means not login, 0 means success, 1 means error, 2 means failed validation, and 3 means no access to the interface.

  • data: Returns the specific data.

  • message: Returns a prompt message for the request. If status is not 0, message will return an error message, where data may have a stack trace field, and return the specific stack information.

In addition: Different status cause different HTTP status code, under normal circumstances:

  • When status is 0, the HTTP status code is 200

  • When the status is -1, the HTTP status code is 401

  • When status is 1, the HTTP status code is 400

  • When status is 2, the HTTP status code is 412

  • When status is 3, the HTTP status code is 403

- + \ No newline at end of file diff --git a/community/development_specification/concurrent/index.html b/community/development_specification/concurrent/index.html index 0353ddcfec9..ba33bf6da78 100644 --- a/community/development_specification/concurrent/index.html +++ b/community/development_specification/concurrent/index.html @@ -7,7 +7,7 @@ Concurrent Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Concurrent Specification

  1. [Compulsory] Make sure getting a singleton object to be thread-safe. Operating inside singletons should also be kept thread-safe.
  2. [Compulsory] Thread resources must be provided through the thread pool, and it is not allowed to explicitly create threads in the application.
  3. SimpleDateFormat is a thread-unsafe class. It is recommended to use the DataUtils utility class.
  4. [Compulsory] At high concurrency, synchronous calls should consider the performance cost of locking. If you can use lockless data structures, don't use locks. If you can lock blocks, don't lock the whole method body. If you can use object locks, don't use class locks.
  5. [Compulsory] Use ThreadLocal as less as possible. Everytime using ThreadLocal and it holds an object which needs to be closed, remember to close it to release.
- + \ No newline at end of file diff --git a/community/development_specification/exception_catch/index.html b/community/development_specification/exception_catch/index.html index 3ab50fa3836..ef0d3455dc0 100644 --- a/community/development_specification/exception_catch/index.html +++ b/community/development_specification/exception_catch/index.html @@ -7,7 +7,7 @@ Exception Catch Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Exception Catch Specification

  1. [Mandatory] For the exception of each small module, a special exception class should be defined to facilitate the subsequent generation of error codes for users. It is not allowed to throw any RuntimeException or directly throw Exception.
  2. Try not to try-catch a large section of code. This is irresponsible. Please distinguish between stable code and non-stable code when catching. Stable code refers to code that will not go wrong anyway. For the catch of unstable code, try to distinguish the exception types as much as possible, and then do the corresponding exception handling.
  3. [Mandatory] The purpose of catching an exception is to handle it. Don't throw it away without handling it. If you don't want to handle it, please throw the exception to its caller. Note: Do not use e.printStackTrace() under any circumstances! The outermost business users must deal with exceptions and turn them into content that users can understand.
  4. The finally block must close the resource object and the stream object, and try-catch if there is an exception.
  5. [Mandatory] Prevent NullPointerException. The return value of the method can be null, and it is not mandatory to return an empty collection, or an empty object, etc., but a comment must be added to fully explain under what circumstances the null value will be returned. RPC and SpringCloud Feign calls all require non-empty judgments.
- + \ No newline at end of file diff --git a/community/development_specification/license/index.html b/community/development_specification/license/index.html index 1b7ca808687..fb8a67ce709 100644 --- a/community/development_specification/license/index.html +++ b/community/development_specification/license/index.html @@ -7,7 +7,7 @@ License Notes | Apache Linkis - + @@ -36,7 +36,7 @@ Maven repository:https://mvnrepository.com/artifact/io.etcd/jetcd-corehttps://mvnrepository.com/artifact/io.etcd/jetcd-launcher

Reference articles#

- + \ No newline at end of file diff --git a/community/development_specification/log/index.html b/community/development_specification/log/index.html index 3e0883bf858..8edd474b919 100644 --- a/community/development_specification/log/index.html +++ b/community/development_specification/log/index.html @@ -7,7 +7,7 @@ Log Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Log Specification

  1. [Convention] Linkis chooses SLF4J and Log4J2 as the log printing framework, removing the logback in the Spring-Cloud package. Since SLF4J will randomly select a logging framework for binding, it is necessary to exclude bridging packages such as SLF4J-LOG4J after introducing new Maven packages in the future, otherwise log printing will be a problem. However, if the newly introduced Maven package depends on a package such as Log4J, do not exclude, otherwise the code may run with an error.
  2. [Configuration] The log4j2 configuration file is default to log4j2.xml and needs to be placed in the classpath. If springcloud combination is needed, "logging:config:classpath:log4j2-spring.xml"(the location of the configuration file) can be added to application.yml.
  3. [Compulsory] The API of the logging system (log4j2, Log4j, Logback) cannot be used directly in the class. For Scala code, force inheritance from Logging traits is required. For Java, use LoggerFactory.GetLogger(getClass).
  4. [Development Convention] Since engineConn is started by engineConnManager from the command line, we specify the path of the log configuration file on the command line, and also modify the log configuration during the code execution. In particular, redirect the engineConn log to the system's standard out. So the log configuration file for the EngineConn convention is defined in the EnginePlugin and named log4j2-engineConn.xml (this is the convention name and cannot be changed).
  5. [Compulsory] Strictly differentiate log levels. Fatal logs should be thrown and exited using System.out(-1) when the SpringCloud application is initialized. Error-level exceptions are those that developers must care about and handle. Do not use them casually. The WARN level is the logs of user action exceptions and some logs to troubleshoot bugs later. INFO is the key process log. Debug is a mode log, write as little as possible.
  6. [Compulsory] Requirements: Every module must have INFO level log; Every key process must have INFO level log. The daemon thread must have a WARN level log to clean up resources, etc.
  7. [Compulsory] Exception information should include two types of information: crime scene information and exception stack information. If not, then throw it by keyword. Example: logger.error(Parameters/Objects.toString + "_" + e.getMessage(), e);
- + \ No newline at end of file diff --git a/community/development_specification/overview/index.html b/community/development_specification/overview/index.html index 5a5a3c51ad8..4aef0e1970e 100644 --- a/community/development_specification/overview/index.html +++ b/community/development_specification/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@

Overview

In order to standardize Linkis's community development environment, improve the output quality of subsequent development iterations of Linkis, and standardize the entire development and design process of Linkis, it is strongly recommended that Contributors follow the following development specifications:

Note: The development specifications of the initial version of Linkis1.0 are relatively brief, and will continue to be supplemented and improved with the iteration of Linkis. Contributors are welcome to provide their own opinions and comments.

- + \ No newline at end of file diff --git a/community/development_specification/path_usage/index.html b/community/development_specification/path_usage/index.html index 60fe12d2b92..64674ca31c7 100644 --- a/community/development_specification/path_usage/index.html +++ b/community/development_specification/path_usage/index.html @@ -7,7 +7,7 @@ Path Usage Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Path Usage Specification

Please note: Linkis provides a unified Storage module, so you must follow the Linkis path specification when using the path or configuring the path in the configuration file.

  1. [Compulsory]When using a file path, whether it is local, HDFS, or HTTP, the schema information must be included. Among them:

    • The Scheme header for local file is: file:///;

    • The Scheme header for HDFS is: hdfs:///;

    • The Scheme header for HTTP is: http:///.

  1. There should be no special characters in the path. Try to use the combination of English characters, underline and numbers.
- + \ No newline at end of file diff --git a/community/development_specification/programming_specification/index.html b/community/development_specification/programming_specification/index.html index dae6571072c..ff91de57615 100644 --- a/community/development_specification/programming_specification/index.html +++ b/community/development_specification/programming_specification/index.html @@ -7,7 +7,7 @@ Programming Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Programming Specification

1. Naming Convention#

  1. [Mandatory] Do not use Chinese pinyin and unintelligible abbreviations
  2. For basic Java naming conventions, please refer to naming-conventions
  3. [Constraints] There is a scalastyle style configuration file in linkis, if it does not conform to the specification, you need to rename it according to the scalastyle style
  4. [Mandatory] Configuration files, startup file, process name, configuration keys,etc. also need to comply with naming conventions, which are as follows:
ClassificationStyleSpecificationsExamples
Configuration fileSeparated by lowercase'-'linkis-classification level (ps/cg/mg)-service name.propertislinkis-cg-linkismanager.properties
Start-stop scriptSeparated by lowercase'-'linkis-classification level-service namelinkis-cg-linkismanager
Module directorySeparated by lowercase'-'The module directory must be below the corresponding classification level, and the module name is a subdirectorylinkis-public-enhancements/linkis-bml
Process namingCamel case namingStart with Linkis and end with service nameLinkisBMLApplication
Configuration Key NamingSeparated by lowercase'.'linkis+module name+keyNamelinkis.bml.hdfs.prefix

2. Annotation Protocol#

  1. [Mandatory] The class, class attribute, interface method must be commented, and the comment must use the Javadoc specification, using the format of /**content*/
  2. [Mandatory] All abstract methods (including methods in interfaces) must be annotated with Javadoc. In addition to return values, parameters, and exception descriptions, they must also indicate what the method does and what functions it implements
- + \ No newline at end of file diff --git a/community/development_specification/release-notes/index.html b/community/development_specification/release-notes/index.html index 1dce50e62bc..7851447b827 100644 --- a/community/development_specification/release-notes/index.html +++ b/community/development_specification/release-notes/index.html @@ -7,7 +7,7 @@ Release-Notes Writing Specification | Apache Linkis - + @@ -16,7 +16,7 @@ Web console WebInstall InstallInstall-Scripts Install-ScriptsInstall-SQL Install-SqlInstall-Web Install-WebCommon module Common
- + \ No newline at end of file diff --git a/community/development_specification/unit_test/index.html b/community/development_specification/unit_test/index.html index 24e011f585c..2237745ca8c 100644 --- a/community/development_specification/unit_test/index.html +++ b/community/development_specification/unit_test/index.html @@ -7,7 +7,7 @@ Test Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Test Specification

  1. [Mandatory] Tool classes and internal interfaces of services must have test case.
  2. [Mandatory] Unit testing needs to be able to be automated (triggered by mvn compilation), independence (unit test cases cannot call each other), and repeatable execution (can be executed multiple times, with the same result)
  3. [Mandatory] A test case should only test one method.
  4. [Mandatory] Test case exceptions cannot be caught and need to be thrown upwards.
  5. [Mandatory] The unit test code must be written in the following project directory: src/test/java or scala, and it is not allowed to be written in other records.
  6. [Recommended] Unit testing needs to consider boundary conditions, such as the end of the month and February.
  7. [Recommended] For database-related unit tests, consider data rollback.
- + \ No newline at end of file diff --git a/community/development_specification/version_feature_specifications/index.html b/community/development_specification/version_feature_specifications/index.html index 867407e39dd..c2eee245b10 100644 --- a/community/development_specification/version_feature_specifications/index.html +++ b/community/development_specification/version_feature_specifications/index.html @@ -7,7 +7,7 @@ Version and New Feature Specification | Apache Linkis - + @@ -15,7 +15,7 @@

Version and New Feature Specification

1. New version specification#

When you need a new version, you need to follow the steps below:

  1. [Mandatory] The new version must be organized for PMC and developers to discuss, and meeting minutes must be recorded and sent to the mailing list
  2. [Mandatory] The scope of the new version of the feature list requires email voting. 3+ PMC approval is required and the approval votes are greater than the negative votes
  3. [Mandatory] After the version is voted on, the corresponding version needs to be established on GitHub Project
  4. [Mandatory] Each feature needs to send a separate mailing list to explain the design reasons and design ideas
  5. [Mandatory] The mailing list needs to be sent to installation, database, configuration modification
  6. [Recommended] One feature corresponds to one issue corresponds to one PR
  7. [Mandatory] Each version requires CICD to pass and test cases to pass before the version can be released
  8. [Constraints] Each version needs to have a corresponding leader, and the leader needs to manage related issues and PRs, and hold discussions, actively respond to emails, confirm plans, track progress, etc.

2. New feature specification#

When you add new features, you need to follow the steps below:

  1. [Mandatory] New features need to send emails to vote, and attach design reasons and design ideas
  2. [Mandatory] New features need to be added to the version corresponding to GitHub Project
  3. [Mandatory] The mailing list needs to be sent to installation, database, configuration modification
  4. [Mandatory] New features must add new documents
  5. [Mandatory] New features need to add corresponding unit tests, Unit Test Specification
  6. [Recommended] One feature corresponds to one issue corresponds to one PR
- + \ No newline at end of file diff --git a/community/how-to-contribute-to-website/index.html b/community/how-to-contribute-to-website/index.html index cdf82989e46..4f11c595dac 100644 --- a/community/how-to-contribute-to-website/index.html +++ b/community/how-to-contribute-to-website/index.html @@ -7,7 +7,7 @@ How to Participate in the Official Website Contribution | Apache Linkis - + @@ -22,7 +22,7 @@ After the verification is correct, the asf-staging branch can be merged to the asf-site branch. The internal mechanism of Apache will deploy the content of the asf-site branch to the formal environment. After the merge, the formal environment is considered to be updated successfully.

6 Points for attention#

  • When adding Chinese documents, you need to add English documents at the same time, otherwise the added page cannot be displayed
  • When adding a document, pay attention to the document node name and node sorting fields, as shown in the following figure
    pg-eng

7 Other#

The naming convention refers to "Alibaba Front-end Development Specification"

- + \ No newline at end of file diff --git a/community/how-to-contribute/index.html b/community/how-to-contribute/index.html index ec78a7001de..39d1540a00c 100644 --- a/community/how-to-contribute/index.html +++ b/community/how-to-contribute/index.html @@ -7,7 +7,7 @@ How to Participate in Project Contribution | Apache Linkis - + @@ -18,7 +18,7 @@ Whether it is a bug fix or a new feature development, please submit a PR to the dev-* branch.
  • PR and submission name follow the principle of <type>(<scope>): <subject>, for details, please refer to Ruan Yifeng's Commit message and Change log writing guide this article.
  • If the PR contains new features, the document update should be included in this PR.
  • If this PR is not ready to merge, please add [WIP] prefix to the head of the name (WIP = work-in-progress).
  • All submissions to dev-* branches need to go through at least one review before they can be merged
  • 2.4 Review Standard#

    Before contributing code, you can find out what kind of submissions are popular in Review. Simply put, if a submission can bring as many gains as possible and as few side effects or risks as possible, the higher the probability of it being merged, the faster the review will be. Submissions with high risk and low value are almost impossible to merge, and may be rejected Review.

    2.4.1 Gain#

    • Fix the main cause of the bug
    • Add or fix a function or problem that a large number of users urgently need
    • Simple and effective
    • Easy to test, with test cases
    • Reduce complexity and amount of code
    • Issues that have been discussed by the community and identified for improvement

    2.4.2 Side effects and risks#

    • Only fix the surface phenomenon of the bug
    • Introduce new features with high complexity
    • Add complexity to meet niche needs
    • Change stable existing API or semantics
    • Cause other functions to not operate normally
    • Add a lot of dependencies
    • Change the dependency version at will
    • Submit a large number of codes or changes at once

    2.4.3 Reviewer notes#

    • Please use a constructive tone to write comments
    • If you need to make changes by the submitter, please clearly state all the content that needs to be modified to complete the Pull Request
    • If a PR is found to have brought new problems after the merger, the Reviewer needs to contact the PR author and communicate to solve the problem; if the PR author cannot be contacted, the Reviewer needs to restore the PR

    ##3, Outstanding Contributor

    3.1 About Committers (Collaborators)#

    3.1.1 How to become Committer#

    If you have submitted a valuable PR to Linkis and have been merged, or contributed continuously for more than half a year, and have led the release of at least one version, you can find a PMC of the Linkis project through the official WeChat group, if he is willing to nominate you as a committer , And are willing to state your contribution to all PMC and Committer, then a vote will be initiated; PMC and other Committers will vote together to decide whether to allow you to join, if you get enough votes, you will become Committer of the Linkis project .

    3.1.2 Committer's#

    • You can join the official developer WeChat group to participate in discussions and formulate Linkis development plans
    • Can manage Issues, including closing and adding tags
    • Can create and manage project branches, except for master and dev-* branches
    • You can review the PR submitted to the dev-* branch
    • Can apply to become a Committee member

    3.2 About Committee#

    3.2.1 How to become a Committee member#

    If you are the Committer of the Linkis project, and all your contributions have been recognized by other Committee members, you can apply to become a member of the Linkis Committee, and other Committee members will vote together to decide whether to allow you to join. If you pass unanimously, you will become a member of the Linkis Committee.

    3.2.2 Rights of Committee members#

    • You can merge PRs submitted by other Committers and contributors to the dev-** branch
    • Participate in determining the roadmap and development direction of the Linkis project
    • Can participate in the new version release
    - + \ No newline at end of file diff --git a/community/how-to-email/index.html b/community/how-to-email/index.html index 48dbb8522b0..02f8000bcd4 100644 --- a/community/how-to-email/index.html +++ b/community/how-to-email/index.html @@ -7,7 +7,7 @@ How to Use Email List | Apache Linkis - + @@ -15,7 +15,7 @@

    How to Use Email List

    Introduces mailing list usage guidelines and reference examples

    To subscribe to the mailing list, please refer to this Subscription Guidelines

    Linkis' archived mail can be viewed here archived mail

    1. Themes#

    The subject of the email can be roughly divided into the following types

    • [DISCUSS] Discussion on a feature/function/logic modification/CI/CD, which can be implementation/design/optimization suggestion, etc.
    • [PROPOSAL] Proposals, such as adding/removing certain features, are not much different from [DISCUSS]
    • [VOTE] Vote for changes/elect Committer/elect new PPMC members, etc., such as version release, each version will be voted on in the community dev mailing list; you can also choose multiple options ,Poll.
    • [ANNOUNCE] Announce the completion of the release of the new version, announcing the new Committer/PPMC elected, etc.
    • [NOTICE] Mainly used to notify some temporary announcements, etc., such as the community sandbox environment is suspended for maintenance/upgrade, the web official website is abnormally unavailable, etc.; as well as online and offline weekly meetings/exchange meetings and various Event information announcements, etc.
    • [HELP] Ask for help, because there are many git code notifications, and sometimes it is impossible to check them in time; in addition, github network access is limited, and some students may not be able to submit issues through github smoothly. Initiated by email, it is easier to identify and be perceived.
    • [VOTE][RESULT] Announce the results of the release vote

    2. Mail Specifications#

    general specification
    • Whenever possible do not send plain HTML messages, but plain text. If you use QQ mailbox, its email content is in html format by default. Please switch to plain text text format when writing. For detailed switching instructions, see the appendix of this article.
    • Developers/community users/PPMC members initiate email discussions/needs for help/notifications for the content of the above scenarios, please send them to dev@linkis.apache.org
    • Please put the corresponding type prefix before the email subject: such as [HELP] XXXXXXX, [DISCUSS] XXXXXXX

    For more information, please refer to the official Mail Etiquette https://infra.apache.org/contrib-email-tips

    [DISCUSS/Proposal] Mail

    • Title [DISCUSS][module name] XXXX (if it is a specific module, it is recommended to bring the module name)
    • Generally, create a corresponding issue on Github's issues column, and then initiate an email discussion
    • Briefly describe clearly the content of the discussion/proposal (eg: background/what problem you want to solve/how to solve it)
    • Modules involved (if one or two specific modules are involved)
    • Graphical and textual information such as relevant design instructions can be placed in the corresponding issue for easy modification, and the link can be quoted in the email.
    • The corresponding Chinese translation can be attached

    [HELP] Mail

    3. Sample reference#

    [DISCUSS/Proposal] Example

    [VOTE] Example

    [ANNOUNCE] Example

    [NOTICE] Example

    [HELP] Example

    4. Mail usage of PPMC#

    From the determination of a version to the release, it may involve specific common scenarios of using email

    1. The new version needs to organize PMC and developers to discuss, record the minutes of the meeting, determine the function points of this version, the general release time of the plan, and the release manager of this time, etc., and send the meeting minutes private@linkis.apache.org Email list.
    2. For the scope of the feature list of the new version, you need to send a voting email to dev@linkis.apache.org, and 3+ PMCs are required to agree and the yes votes are greater than the negative votes.
    3. For the weekly regular meeting hosted, the meeting invitation reminder email needs to be released before the meeting / the meeting minutes email should be sent to the dev@linkis.apache.org mailbox after the meeting
    4. New committer/ppmc votes need to be sent to private@linkis.apache.org. See https://community.apache.org/newcommitter.html for the new committee/ppmc selection process

    5. How to Reply to Version Release Voting Emails#

    If a release vote is initiated, after verification (see How to verify for the detailed verification process), you can refer to this reply example for email reply

    If you initiate a posting vote, you can refer to this response example to reply to the email after verification

    When replying to the email, you must bring the information that you have checked by yourself. Simply replying to `+1 approve` is invalid.

    PPMC/IPMC member voting is best with the binding suffix, indicating a binding vote, which is convenient for counting voting results

    Non-PPMC/Non-IPMC member

    +1 (non-binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    PPMC/IPMC member

    +1 (binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    6. Appendix#

    QQ mailbox switch to plain text format

    image

    - + \ No newline at end of file diff --git a/community/how-to-participate-in-developer-meetings/index.html b/community/how-to-participate-in-developer-meetings/index.html index f957429e4ca..683b6d7c154 100644 --- a/community/how-to-participate-in-developer-meetings/index.html +++ b/community/how-to-participate-in-developer-meetings/index.html @@ -7,7 +7,7 @@ How to Participate in Developer Meetings | Apache Linkis - + @@ -16,7 +16,7 @@ It is strongly recommended to subscribe to the Apache mail to keep abreast of the latest developments in the community. Subscription strategy: https://linkis.apache.org/zh-CN/community/how-to-subscribe

    Regular meeting documentation#

    1. "Documents on Weekly Meeting Issues": Record the weekly meeting time, host, issues and other information;
    2. "Linkis Incubation Progress": record information such as Linkis version progress, development specifications, community management, etc.
    3. "Question Collection": Record community developers' questions, answers and other information

    Regular meetings held#

    - + \ No newline at end of file diff --git a/community/how-to-release/index.html b/community/how-to-release/index.html index b8d854b4a0c..951b68061ba 100644 --- a/community/how-to-release/index.html +++ b/community/how-to-release/index.html @@ -7,7 +7,7 @@ How to Release | Apache Linkis - + @@ -120,7 +120,7 @@ For more information, please refer to the official Email Specification https://infra.apache.org/contrib-email-tips

    Gmail mailbox switch to plain text format

    image

    QQ mailbox switch to plain text format

    image

    - + \ No newline at end of file diff --git a/community/how-to-sign-apache-icla/index.html b/community/how-to-sign-apache-icla/index.html index 0ae18fefbec..77d84a50d6c 100644 --- a/community/how-to-sign-apache-icla/index.html +++ b/community/how-to-sign-apache-icla/index.html @@ -7,7 +7,7 @@ ICLA Signing Process | Apache Linkis - + @@ -18,7 +18,7 @@ Hello Apache Incubator: I have accepted the Apache Linkis(Incubator) PPMC invitation to become linkis committer, the attachment is my ICLA information.Thanks!

    Here is a specific example:

    example

    6.After the email is sent successfully, Apache ShenYu official community informs

    Manual signature and PDF software signature Demo#

    PDF online signature

    • Download the PDF source file
    • Fill in items and personal information
    • Open PDF and sign URL online
    • Signature
    • Save and download signed pdf fils
    • Send to the specified mailbox

    Handwritten signature

    • Download the PDF source file
    • Fill in items and personal information
    • Print documents
    • Handwritten signature
    • Convert photos into single pdf file
    • Send to the specified mailbox

    For example files, please refer to https://github.com/casionone/incubator-linkis-website/tree/dev/resource/wangming-icla.pdf

    - + \ No newline at end of file diff --git a/community/how-to-subscribe/index.html b/community/how-to-subscribe/index.html index eaf5835b4cd..00fae2e12a6 100644 --- a/community/how-to-subscribe/index.html +++ b/community/how-to-subscribe/index.html @@ -7,7 +7,7 @@ How to Subscribe | Apache Linkis - + @@ -15,7 +15,7 @@

    How to Subscribe

    Apache has configured a series of mailing lists for each project. Mailing lists are an important form of communication in the Apache community.

    Many things in the daily operation and maintenance of the community are carried by mailing lists, such as technical discussions, any ideas or suggestions, project questions and answers, new functions/features/major changes decisions and notifications, version release voting, etc. As long as it is related to the project, you can initiate a discussion here.

    As long as you subscribe to this mailing list, you can get the latest developments in the Linkis community for the first time and keep pace with the community.

    Linkis project mailing list

    namedescriptionSubscribeUnsubscribearchive
    dev@linkis.apache.orgcommunity activity informationsubscribeunsubscribearchive
    commits@linkis.apache.orgCode repo update informationsubscribeunsubscribearchive

    1. Subscribe to the mailing list#

    For example,Take subscribing to the dev@linkis.apache.org mailing list

    The steps are as follows:

    1. Send an email without any content or subject: dev-subscribe@linkis.apache.org
    2. Wait until you receive an email with the subject line confirm subscribe to dev@linkis.apache.org (if you have not received it for a long time, please confirm whether the email is blocked by your email, if you have not been blocked and will receive a reply for a long time, return to step 1)
    3. Reply directly to the email without changing the subject line or adding the email content.
    4. Wait until you receive an email with the subject line WELCOME to dev@linkis.apache.org .
    5. If you receive an email from (4), you have successfully subscribed to the email. To initiate a discussion, you can send an email directly to dev@linkis.apache.org, which will be sent to everyone who subscribed to the mailing list.

    2. Unsubscribe from the mailing list#

    The steps for unsubscribing to a mailing list are similar to those for subscribing to a mailing list::

    1. Send an email without any content or subject to: dev-unsubscribe@linkis.apache.org
    2. Wait until you receive an email with the subject line confirm unsubscribe from dev@linkis.apache.org
    3. Reply directly to the email without changing the subject line or adding the email content
    4. Wait until you receive an email with the subject line GOODBYE from dev@linkis.apache.org
    5. Unsubscribe success

    3. Issues related#

    Linkis's issues address https://github.com/apache/incubator-linkis/issues

    For new problem/ideas, you can create a new issue through [new issues], and describe the issues in detail as much as possible, so that community members can follow up, discuss and solve them. For any problems of the project, it is recommended to create issues first to record and follow up, so that the entire process can be well preserved and archived to facilitate subsequent users to retrieve.

    For existing issues, if you are interested, you can reply and discuss as much as you want. For tasks/bug-type issues, if you are interested, you can follow or directly participate in the task. Community partners are very welcome to contribute their efforts to Linkis.

    - + \ No newline at end of file diff --git a/community/how-to-verify/index.html b/community/how-to-verify/index.html index f50227010c2..ccfaf913f9d 100644 --- a/community/how-to-verify/index.html +++ b/community/how-to-verify/index.html @@ -7,7 +7,7 @@ How to Verify | Apache Linkis - + @@ -26,7 +26,7 @@

    2.4.4 Check related compliance items#

    and check as follows:

    • Check whether the source package contains unnecessary files, which makes the tar package too large
    • Folder contains the word incubating
    • There are LICENSE and NOTICE files
    • There is a DISCLAIMER or DISCLAIMER-WIP file
    • The year in the NOTICE file is correct
    • Only text files exist, not binary files
    • All files have ASF license at the beginning
    • Able to compile correctly
    • Check for extra files or folders, such as empty folders, etc.
    • .....

    2.5 Check the binary package#

    If the binary/web-binary package is uploaded, check the binary package.

    Unzip apache-linkis-${release_version}-incubating-bin.tar.gz

    $ mkdir apache-linkis-${release_version}-incubating-bin$ tar -xvf  apache-linkis-${release_version}-incubating-bin.tar.gz -C  apache-linkis-${release_version}-incubating-bin$ cd apache-linkis-${release_version}-incubating-bin

    and check as follows:

    • Folder contains the word incubating
    • There are LICENSE and NOTICE files
    • There is a DISCLAIMER or DISCLAIMER-WIP file
    • The year in the NOTICE file is correct
    • All text files have ASF license at the beginning
    • Check the third-party dependent license:
    • Compatible with third-party dependent licenses
    • All third-party dependent licenses are named in the LICENSE file
    • If you are relying on the Apache license and there is a NOTICE file, then these NOTICE files also need to be added to the version of the NOTICE file
    • .....

    You can refer to this article: ASF Third Party License Policy

    3. Email reply#

    If you initiate a posting vote, you can refer to this response example to reply to the email after verification

    When replying to the email, you must bring the information that you have checked by yourself. Simply replying to `+1 approve` is invalid.

    When PPMC votes in the dev@linkis.apache.org linkis community, Please bring the binding suffix to indicate that it has a binding vote for the vote in the linkis community, and it is convenient to count the voting results.

    When IPMC votes in the general@incubator.apache.org incubator community. Please bring the binding suffix to indicate that the voting in the incubator community has a binding vote, which is convenient for counting the voting results.

    note

    If you have already voted on dev@linkis.apache.org, you can take it directly to the incubator community when you reply to the vote, such as:

    //Incubator community voting, only IPMC members have binding binding,PPMC needs to be aware of binding changesForward my +1 from dev@linkis (non-binding)Copy my +1 from linkis DEV ML (non-binding)

    Non-PPMC/Non-IPMC member

    +1 (non-binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    PPMC/IPMC member

    //Incubator community voting, only IPMC members have binding binding+1 (binding)I checked:     1. All download links are valid     2. Checksum and signature are OK     3. LICENSE and NOTICE are exist     4. Build successfully on macOS(Big Sur)     5.  

    4. Precautions#

    If you have maven tools installed, you can replace ./mvnw or mvnw.cmd with your own mvn command

    mvnw is short for Maven Wrapper. It can support running Maven projects without installing Maven and configuring environment variables. If it can't find it, it will download the corresponding Maven version according to the configuration file

    - + \ No newline at end of file diff --git a/community/how-to-vote-a-committer-ppmc/index.html b/community/how-to-vote-a-committer-ppmc/index.html index 6828e32044d..9bf63deabbd 100644 --- a/community/how-to-vote-a-committer-ppmc/index.html +++ b/community/how-to-vote-a-committer-ppmc/index.html @@ -7,7 +7,7 @@ How to Vote New Committer/PPMC | Apache Linkis - + @@ -42,7 +42,7 @@ Thanks!The Apache Linkis(Incubating) PPMC

    At this point, the whole process is over, and the candidate officially becomes the Committer or PPMC of the project.

    - + \ No newline at end of file diff --git a/community/how-to-write-unit-test-code/index.html b/community/how-to-write-unit-test-code/index.html index 09d8f156631..413a97a1b75 100644 --- a/community/how-to-write-unit-test-code/index.html +++ b/community/how-to-write-unit-test-code/index.html @@ -7,7 +7,7 @@ How to Write Unit Test Code | Apache Linkis - + @@ -48,7 +48,7 @@ #配置mybatis-plus的mapper信息 因为使用的是mybatis-plus,使用mybatis-plusmybatis-plus.mapper-locations=classpath:org/apache/linkis/jobhistory/dao/impl/JobDetailMapper.xml,classpath:org/apache/linkis/jobhistory/dao/impl/JobHistoryMapper.xmlmybatis-plus.type-aliases-package=org.apache.linkis.jobhistory.entitymybatis-plus.configuration.log-impl=org.apache.ibatis.logging.stdout.StdOutImpl

    List is configured with predicate of stream to make assertion judgment and write specification

    1. Use @Transactional and @Rollback to realize data rollback and avoid data pollution
    2. Each DaoTest should have a public method for creating and initializing data (or the way of importing data CSV) to prepare data. For related queries, updates, deletions and other operations, the public method should be called first to prepare data
    3. Create test data. If an attribute value is a self increasing ID, it should not be assigned
    4. The test data created shall be consistent with the actual sample data as far as possible
    5. When updating the data test, if the field allows, please prefix it with 'modify original value'
    - + \ No newline at end of file diff --git a/community/microservice-division/index.html b/community/microservice-division/index.html index 106aa7d2e28..32232644643 100644 --- a/community/microservice-division/index.html +++ b/community/microservice-division/index.html @@ -7,7 +7,7 @@ Division of Microservices | Apache Linkis - + @@ -15,7 +15,7 @@

    Division of Microservices

    Introduction to service#

    Linkis is developed based on the microservice architecture, and its services can be divided into 3 types of service groups (groups): computing governance service group, public enhancement service group and microservice governance service group.

    • Computation Governance Services: The core service for processing tasks, supporting the 3 main stages of the computing task/request processing flow (submit->prepare->execute);
    • Public Enhancement Services: Provide basic support services, including context services, engine/udf material management services, historical tasks and other public services and data source management services;
    • Microservice Governance Services: Customized Spring Cloud Gateway, Eureka. Provides a base for microservices.

    The microservices included in each service group (group) are as follows:

    Belonging to the microservice group (group)Service nameMain functions
    MGSlinkis-mg-eurekaResponsible for service registration and discovery, other upstream components will also reuse the linkis registry, such as dss
    MGSlinkis-mg-gatewayAs the gateway entrance of Linkis, it is mainly responsible for request forwarding and user access authentication
    CGSlinkis-cg-entranceThe task submission entry is a service responsible for receiving, scheduling, forwarding execution requests, and life cycle management of computing tasks, and can return calculation results, logs, and progress to the caller
    CGSlinkis-cg-linkismanagerProvides AppManager (application management), ResourceManager (resource management), LabelManager (label management) capabilities
    CGSlinkis-cg-engineconnpluginThe engine connector plug-in provides the basic function support for freely extending the Linkis engine, and allows the introduction of new engines into the execution life cycle of computing middleware by implementing the established plug-in interfaces, enabling new engines to be implemented Rapid Deployment
    CGSlinkis-cg-engineconnmanagerThe manager of EngineConn, which provides the life cycle management of the engine, and reports the load information and its own health status to the ResourceManager
    CGSlinkis-cg-engineconnis the actual connection service with the underlying computing storage engine (Hive/Spark), and contains session information with the actual engine. For the underlying computing storage engine, it acts as a client
    PESlinkis-ps-publicserviceProvide functions such as unified configuration management, context service, BML material library, data source management, microservice management and historical task query for other microservice modules
    PESlinkis-ps-csContext service, solving a data application development process, data and information sharing across multiple services
    PESlinkis-ps-metadatamanagerOnly provides metadata query service Provides the basic query function of database data metadata, provides http interface externally, and provides rpc service internally, which is convenient for the data source management module to call through rpc to conduct Data source connection test
    PESlinkis-ps-data-source-managerData source management service Perform basic management of data sources, and provide http interfaces such as adding, querying, modifying, and connection testing of external data sources. The rpc service is provided internally, which is convenient for the data element management module to call through rpc and query the necessary information needed to establish a connection to the database

    Basic terms explained#

    IntroductionFull name in EnglishFull name in Chinese
    CG/cgComputation GovernanceComputation Governance
    MG/mgMicroservice GovernanceMicroservice Governance
    PS/psPublic ServicePublic Service
    CS/csContext ServiceUnified Context
    DSS/dssDataSphere StudioData Application Integrated Development Framework
    EC/ecEngineConnEngine Connector
    ECM/ecmEngineConnManagerManagement of Engine Connectors
    ECP/ecpEngineConnPluginEngine Connector Plugin
    RM/rmResourceManagerResource manager for managing node resources
    PES/pesPublic Enhancement Services
    DMS/dmsData Source Manager ServiceData Source Management Service
    MDS/mdsMetaData Manager ServiceMetadata Management Service
    BML/bmlBigData Material library
    UJESUnified Job Execute ServiceUnified Job Execute Service
    DDL/ddlData Definition LanguageDatabase Definition Language
    DML/dmlData Manipulation LanguageData Manipulation Language
    - + \ No newline at end of file diff --git a/community/ppmc-related-permission-configuration/index.html b/community/ppmc-related-permission-configuration/index.html index 3ddf16af335..3f8279aca04 100644 --- a/community/ppmc-related-permission-configuration/index.html +++ b/community/ppmc-related-permission-configuration/index.html @@ -7,7 +7,7 @@ PPMC/Committer Related Permission Configuration | Apache Linkis - + @@ -27,7 +27,7 @@ Every PPMC member is required to subscribe to the following mailing lists:

    NameDescriptionSubscribe MailUnsubscribe MailMail Archive
    dev@linkis.apache.orgLinkis community activity information, project discussion announcements, etc.Subscribeunsubscribearchive
    private@linkis.apache.orgThis mailing list is private, visible inside PPMC, mainly for internal discussions[Subscribe](mailto:private-subscribe@ linkis.apache.org)unsubscribearchive
    general@incubator.apache.orgPublic emails from the incubator community, mainly related to incubation projects[Subscribe](mailto:general-subscribe@incubator.apache. org)unsubscribearchive

    Subscription operations can be found in the guide Subscribe to the mailing list

    note

    Note: private@linkis.apache.org subscriptions need to be reviewed by the mail moderator (shaofengshi@apache.org), so please attach personal name information to the content of the mail when subscribing for moderator review.

    If the above subscription is unsuccessful, you can try to use the web-side tool: https://whismy.apache.org/committers/subscribe.cgi

    image

    Mailing list subscriptions, PPMC members can view here: https://whismy.apache.org/roster/ppmc/linkis

    - + \ No newline at end of file diff --git a/community/security/index.html b/community/security/index.html index 4c8665ecd4e..804935bc4ec 100644 --- a/community/security/index.html +++ b/community/security/index.html @@ -7,7 +7,7 @@ Security | Apache Linkis - + @@ -15,7 +15,7 @@

    Security

    The Apache Software Foundation takes a rigorous stance on eliminating security issues in its software projects. Likewise, Apache Linkis is also vigilant and takes security issues related to its features and functionality into the highest consideration.

    If you have any concerns regarding Linkis’s security, or you discover a vulnerability or potential threat, please don’t hesitate to get in touch with the Apache Security Team by dropping an email at security@apache.org.

    Please specify the project name as "Linkis" in the email, and provide a description of the relevant problem or potential threat. You are also urged to recommend how to reproduce and replicate the issue.

    The Apache Security Team and the Linkis community will get back to you after assessing and analyzing the findings.

    Please note that the security issue should be reported on the security email first, before disclosing it on any public domain.

    - + \ No newline at end of file diff --git a/community/site-map/index.html b/community/site-map/index.html index 090c9c8abfc..4d6cec09d6b 100644 --- a/community/site-map/index.html +++ b/community/site-map/index.html @@ -7,7 +7,7 @@ sitemap | Apache Linkis - + @@ -19,7 +19,7 @@ \[Filters] is:pr is:closed closed:>2021-08-02 https://github.com/apache/incubator-linkis/issues?q=is%3Aissue+is%3Aclosed+closed%3A%3E2021-08-02
    - + \ No newline at end of file diff --git a/docs/0.11.0/api/login_api/index.html b/docs/0.11.0/api/login_api/index.html index 2359c6db6e9..f79bdeba137 100644 --- a/docs/0.11.0/api/login_api/index.html +++ b/docs/0.11.0/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -16,7 +16,7 @@ -Heartbeat

    4 Interface details#

    4.1 Login#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": "/api/rest_j/v1/user/login",        "status": 0,        "message": "OK",        "data": {            "isAdmin": false,            "loginNum": 5,            "userName": "enjoyyin",            "lastLoginTime": 1722222222222        }      }

    4.2 Logout#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successfully!"    }

    4.3 Heartbeat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintaining the heartbeat success!"    }
    - + \ No newline at end of file diff --git a/docs/0.11.0/api/rest_api/index.html b/docs/0.11.0/api/rest_api/index.html index 57e5b574ce0..ea3b46ad373 100644 --- a/docs/0.11.0/api/rest_api/index.html +++ b/docs/0.11.0/api/rest_api/index.html @@ -7,7 +7,7 @@ Restful Api | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/0.11.0/api/web_socket/index.html b/docs/0.11.0/api/web_socket/index.html index eaf8db5cf37..629c5aed02a 100644 --- a/docs/0.11.0/api/web_socket/index.html +++ b/docs/0.11.0/api/web_socket/index.html @@ -7,7 +7,7 @@ WebSocket | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    WebSocket

    Linkis provides access to WebSocket, and the frontend can interact with Link's WebSocket only in real time and does not need to be queried through Restful.

    1 Linkis interface specification#

    Linkis defines a set of its own interface norms when interacting at the back and back end.

    If you are interested in interface specifications, please click hereto view interface norms

    2 WebSocket Interface Summary#

    We provide the following interfaces to facilitate rapid user submission of Jobs for implementation.

    • Create WebSocket Connection
    • Submit for Implementation
    • Service active return status, logs, and progress

    3 Interface Details#

    3.1 Establishing a connection#

    This interface is intended to create a WebSocket connection with Linkis.

    • /api/res_j/entrance/connect

    • Request Method GET

    • Response status code 101

    3.2 Submission of implementation#

    Requested task is to submit user's assignment to Linkis for execution interface

    • Interface /api/res_j/entrance/execution

    • Submit Method POST

    • Request JSON Example

    {    "method":"/api/rest_j/v1/entrance/execute",    "data":{        "params": {            "variable":{                "k1":"v1"            },            "configuration":{                "special":{                    "k2":"v2"                },                "runtime":{                    "k3":"v3"                },                "startup":{                    "k4":"v4"                }            }        },        "executeApplicationName":"spark",        "executionCode":"show tables",        "runType":"sql",        "source":{            "scriptPath": "/home/Linkis/Linkis.sql"        },    "websocketTag":"37fcbd8b762d465a0c870684a0261c6e"    }}
    • The parameters in the requested body data are described below.
    Parameter NameDefinition of parametersTypeRemarks
    executeApplicationNameEngine services such as Spark, hive, etc. the user expects to useStringmust not be empty
    requestApplicationNameName of system to launch the requestStringis empty
    paramsUser-specified parameters for running the service programMapRequired, the value inside is empty
    Execution CodeExecution code submitted by userStringmust not be empty
    runTypeWhen users perform such services as spark, they can select python, R, SQL, etc.Stringmust not be empty
    scriptPathPath to store for user submitted code scriptsStringIf IDE, it cannot be empty with execution code
    • Return Example
    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "The request was executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_johnnwang_21",   "taskID": "123"   }}
    • The execID is the unique execution ID generated for the task after the user task has been submitted to UJES, the String type that is useful only when the task is running, similar to the PID concept.ExecID is designed (requestApplicationName length) (executeAppName length1) (Instalment 2)${requestApplicationName}${executeApplicationName}${entranceInstanceip+port}${requestApplicationName}${umUser}${index}
    • taskID is the unique ID that represents the user submission of tasks. This ID is generated by database auto-adding, long type

    3.3 Task Status, Logs, Progress Proactive Push#

    Once executed, the server will take the initiative to push information about the status, logs, progress, etc. You can also use WebSocket to ask for status, logs, and progress.

    Server has initiated the following content:

    • Logs
    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",    "log": ["error","warn","info", "all"],  "taskID":28594,    "fromLine": 56  },  "websocketTag":"37fcbd8b762d465a0c870684a0261c6e"}
    • Status
    LOD  "method": "/api/res_j/v1/entrance/${execID}/status",  "status": 0,  "message": "Return status information",  "data": {    "execID": "${execID}",    "taskID":28594,      "status": "Running",  },  "websocketTag": "37fcbd8b762d465a0c860684a0261c6e"}
    • Progress
    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "taskID":28594,    "progress": 0.2,    "progressInfo": [        {            "id": "job-1",            "succeedTasks": 2,            "failedTasks": 0,            "runningTasks": 5,            "totalTasks": 10        },        {            "id": "job-2",            "succeedTasks": 5,            "failedTasks": 0,            "runningTasks": 5,            "totalTasks": 10        }    ]  },  "websocketTag":"37fcbd8b762d465a0c870684a0261c6e"}
    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/commons/real-time_log_push/index.html b/docs/0.11.0/architecture/commons/real-time_log_push/index.html index d377a132a1f..97745a20be1 100644 --- a/docs/0.11.0/architecture/commons/real-time_log_push/index.html +++ b/docs/0.11.0/architecture/commons/real-time_log_push/index.html @@ -7,7 +7,7 @@ Asynchronous Log Live Push | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Asynchronous Log Live Push

    Interservice Asynchronous Push Scheme

    1 Background#

    With the use of the microservice architecture, multiple microservices will be deployed on different servers, and the logs generated by each microservice will be distributed among the servers. While the ELK is able to filter the logs to be viewed by users, there is a lack of time and customization.

    If log disks before collection logs are used, it will be difficult to classify logs according to the user's running information, first because of the excess time spent on disk, and then because of the loss of user information during the running phase after the log disk.

    2 Ideas#

    Design an Appender based on log4j2 and add to the configuration of the microservice log, then use the listener design mode and the RPC's service call to implement the log in real time.

    In-service Asynchronous Times Push Scheme

    3 Implementation#

    3.1 Design implementation of Appender components#

    The current background development of the more popular log framework uses slf4j as the faculty for logic, logback or log4j2, and now the open source project will prefer log4j2 as a framework for log printing, owing to its performance advantages and open source license.

    Appender is a target (destination) printed in log4j2 logs. Once a log event is generated during the microservice running, all the Appenders registered in the configuration will get this log event.

    The Linkis designed Appender will cache the acquired log event in the queue after the log microservice log is generated, and we have a listener in the Appender.

    3.2 The design implementation of listeners#

    The listener mode is a common design mode and is a common method of implementing asynchronous callback programming.

    The listener needs to listen for the log event queue and if the log event queue is full, the log will be removed from the queue and sent via HTTP.

    The listener also needs to listen to the task state and after the task has completed all the execution steps in this microservice, it will be necessary to send all the logs cached in the queue to prevent the log loss.

    3.3 Design implementation for the cache queue#

    The reason for using the cache queue is that if a log event is generated it will push the microservice to receive the log because HTTP requests are too frequent, so it must be cached and the general cache queue can be designed as a blockqueue with the maximum number of caches.

    3.4 Logging collection#

    The log will be sent to the microservice collected by the log, which will then sort and encapsulated the log, which will be pushed to the user interface, and the log will be asynchronous and backups of a task log for each user.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/commons/rpc/index.html b/docs/0.11.0/architecture/commons/rpc/index.html index 56c03275529..6404311a1bc 100644 --- a/docs/0.11.0/architecture/commons/rpc/index.html +++ b/docs/0.11.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    RPC Architecture

    1 Background#

    The call of HTTP interface between Feign-based microservices can only satisfy a simple A microservice instance that randomly selects a service instance in B microservices according to simple rules, and if this B microservice instance wants to asynchronously return information To the caller, it is simply impossible to achieve.

    At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2 Introduction#

    Linkis has implemented a set of its own underlying RPC communication scheme based on Feign.

    As the underlying communication solution, Linkis RPC integrates the SDK into the microservices in need.

    A microservice can be both a request caller and a request receiver.

    As the request caller, the Receiver of the target receiver's microservice will be requested through the Sender. As the request receiver, the Receiver will be provided to process the request sent by the request receiver Sender in order to complete a synchronous response or an asynchronous response.

    Linkis RPC architecture diagram

    3 Implementation#

    Based on the Sender system of the requesting party and the Receiver system of the requesting party, the entire structure of Linkis RPC is formed.

    Linkis RPC detailed architecture diagram

    3.1 Sending end#

    As the underlying communication layer, Linkis RPC does not require users to write any actual code on the sending end.

    -1) The user obtains a Sender by calling the SDK provided by Linkis RPC, using the microservice name (Service Name) or specifying the microservice instance (microservice name + IP and port of the microservice instance).

    Sender provides usable methods, see the following pseudo code:

    abstract class Sender {Object ask(Object message);Object ask(Object message, Duration timeout);void send(Object message);void deliver(Object message);}

    where:

        1. The ask method is a synchronous request response method, requiring the receiving end to return a response synchronously;    2. The send method is a synchronous request method, which is only responsible for sending the request to the receiving end synchronously, and does not require the receiving end to give a reply;    3. Deliver is an asynchronous request method. As long as the process on the sending end does not exit abnormally, the request will be sent to the receiving end through other threads later.

    -2) As the sender, the user sends a request to the receiver through the request method provided by the sender.

    -3) The Sender sends the user's request to the interceptor. The interceptor intercepts the request and starts to do additional functional processing on the request:

     a) Broadcast interceptor.  The broadcast interceptor only takes effect for requests that need to be broadcast.  The broadcast interceptor will provide a special broadcast interface. If this request implements the broadcast interface and the request is not being broadcast, the broadcast interceptor thinks that this request needs to be broadcast, and the broadcast operation will be triggered at this time.  The specific steps are: get all the microservice instances for which the request needs to be broadcasted. If it is empty, it will broadcast to all instances of the microservice by default; then mark the request as being broadcast and call step 1) to obtain the corresponding microservice instances. All Senders start to send requests in a multi-threaded manner; when all Senders in the thread pool have finished sending requests, the broadcast request is marked as successful and returned to the user to complete the processing.  b) Retry the interceptor.  The retry interceptor will provide a retry function for all the next steps.  If the sender successfully sends the request, but the receiver returns an exception that requires a retry, the retry interceptor will be triggered to re-submit the request automatically; if the request does not specify a specific instance of the microservice receiver, send If a ConnectException (connection exception) occurs during the request, it will actively retry; or if the user has specified certain exceptions to be retryed, the retry interceptor will automatically retry at this time.  c) Cache interceptor.  The cache interceptor is set for synchronization requests whose response content is unlikely to change frequently.  The cache interceptor will also provide a special cache interface. If this request implements the cache interface, it will first look for whether the request has cached the response from the receiving end in the cache interceptor. If so, it will directly return the cached response, otherwise continue to connect. After the pull-down step and the response is returned at the receiving end, the response is first cached, and then the response is returned to the user, and the processing is completed.  d) The default interceptor.  The default interceptor is used to call the next processing steps.  e) Custom interceptor. Users can also implement their own custom interceptors to achieve some specific functions.

    -4) The request encoder will first convert the data (entity bean) requested by the user into a serialized JSON string, and then pass it to the Feign client generator.

    -5) Feign client generator, which generates Feign client that can access the receiver Restful request receiver.

    -6) The generated Feign client will call the service discovery manager to obtain a list of all microservices. Through the service selector, if the user specifies the microservice name in step 1), then it will pass Feign's load balancing strategy. Select a suitable receiver microservice instance for request forwarding, otherwise the service selector will rewrite Spring Cloud Feign's FeignLoadBalancer (Feign load balancer). When creating LoadBalancerCommand, specify the corresponding microservice instance as step 1) Obtain The microservice instance specified when Sender.

    -7) Call the Feign client to start requesting the Restful request receiver on the receiving end.

    3.2 Receiver#

    The receiving end requires users to implement the Receiver interface for processing real business logic.

    1) The Restful request receiver, as an embedded HTTP request Web Service service in Linkis RPC, is responsible for receiving requests from the sender

    2) After the Restful request receiver receives the request, it first calls the request decoder to decode the request, and parses out the actual request information and sender microservice information (microservice name + microservice instance IP and port), if the analysis fails , Will directly respond to the failure of the analysis request.

    3) Put the parsed request information and sender microservice information into the request message queue;

    4) The request consumer will consume the decoded sender request in the request message queue.

    Obtain a suitable Receiver by calling the Receiver manager; at the same time, use the Sender generator to generate a Sender pointing to the sender through the obtained sender microservice information. Then the request consumer sends the actual request information and the generated sender Sender to the Receiver for processing;

    5) Receiver, as the actual processing unit of user requests, requires users to implement the Receiver interface to complete the actual processing logic of the caller request.

    The pseudo code of Receiver is as follows:

    public interface Receiver {void receive(Object message, Sender sender);Object receiveAndReply(Object message, Sender sender);    Object receiveAndReply(Object message, Duration duration, Sender sender);}

    Receiver provides methods to handle synchronous and asynchronous requests.

    6) If this request is an asynchronous request, the Receive method of Receiver is called, and the upper-layer business decides whether it needs to send back the response through the Sender of the sender.

    7) If this request is a synchronous request, call Receiver's receiveAndReply method, take the return value as the response result, and send back to the sender.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/commons/scheduler/index.html b/docs/0.11.0/architecture/commons/scheduler/index.html index a848e40796c..fa494d44de2 100644 --- a/docs/0.11.0/architecture/commons/scheduler/index.html +++ b/docs/0.11.0/architecture/commons/scheduler/index.html @@ -7,7 +7,7 @@ Scheduler Architecture | Apache Linkis - + @@ -16,7 +16,7 @@ If it is within the allowable range, the grouped object will get the corresponding consumer through the consumption manager. While setting the target parameter to the target value, set the other parameters to the corresponding value according to the matched group of numbers. .

    b) The parameter limit must meet a certain ratio.

    The grouped object gets the corresponding consumer through the consumer manager, and while setting the target parameter to the target value, the other parameters are also calculated in proportion to the corresponding target value, and all are reset.

    In addition to manually setting parameters, each consumer has an independent monitoring thread to count the length of the waiting queue in the consumer, the number of events being executed, and the growth rate of execution time.

    In each grouping object, thresholds and alarm ratios are set for these indicators. Once an indicator exceeds the threshold, or the ratio between multiple indicators exceeds a limited range (for example, when the average execution time is monitored to be greater than the distribution interval parameter, the threshold is considered to be exceeded ), the monitoring thread will immediately expand the consumer accordingly.

    When expanding, it will make full use of the above-mentioned parameter adjustment process to increase a certain parameter in a targeted manner, and other parameters will be automatically expanded accordingly.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/overview/index.html b/docs/0.11.0/architecture/overview/index.html index 9d32ee31504..deb4680973e 100644 --- a/docs/0.11.0/architecture/overview/index.html +++ b/docs/0.11.0/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Overview

    1 The original intention of Linkis#

    Almost every component of the big data open source ecosystem has its own set of user rights management, resource management, metadata management, independent API access and usage methods.

    And various new components continue to appear.

    However, the user's business needs usually require the collaborative processing of multiple open source components to achieve.

    For a business requirement, users need to learn the manuals of multiple products, and need to do repeated customized development on multiple products, in order to truly introduce open source components into the actual production environment.

    This has brought extremely high learning costs and extra workload to users, and a large amount of repeated maintenance and management work is also required for operation and maintenance.

    At the same time, the coupling between the upper-level functional tool products and the underlying computing storage system is too high, and the hierarchical structure and calling relationship are not clear and decoupled. As a result, any changes in the underlying environment will directly affect the normal use of business products.

    How to provide a set of unified data middleware, docking with upper-level application tools, shielding various calls and usage details at the bottom, and truly enabling business users to only pay attention to the details of business implementation, even if the underlying big data platform's computer room expansion and overall relocation are both Not affected, is the original intention of Linkis!

    2 Linkis Technical Architecture#

    Technical Architecture

    As shown in the figure above, we have built multiple microservice clusters based on the current popular SpringCloud microservice technology to provide high availability capabilities.

    Each microservice cluster bears part of the system's functional responsibilities, and we have made the following clear divisions. like:

    -Unified Job Execution Service: A distributed REST/WebSocket service for receiving various script requests submitted by users.

    Currently supported computing engines are: Spark, Python, TiSpark, Hive, Shell, etc.

    Supported scripting languages ​​are: SparkSQL, Spark Scala, Pyspark, R, Python, HQL and Shell, etc.;

    For more information about unified job execution services, please check UJES Architecture Design Document

    -Resource Management Service: Support real-time management and control of the resource usage of each system and user, limit the resource usage and concurrency of the system and users, and provide real-time resource dynamic charts to facilitate viewing and managing the system and users resource;

    Currently supported resource types: Yarn queue resources, servers (CPU and memory), number of concurrent users, etc.

    For more information about resource management services, please check RM Architecture Design Document

    -~~Application management service (not available in open source version): Manage all user applications of all systems, including offline batch applications, interactive query applications, and real-time streaming applications, providing powerful replication for offline and interactive applications It also provides application lifecycle management, and automatically releases users’ redundant idle applications; ~~

    -Unified storage service: Universal IO architecture, which can quickly connect to various storage systems, provide a unified call entry, support all commonly used format data, high integration, and easy to use;

    For more information on unified storage services, please check [Storage Architecture Design Document]

    -Unified Context Service: Unified user and system resource files (JAR, ZIP, Properties, etc.), unified management of parameters and variables of users, systems, and calculation engines, one setting and automatic reference everywhere;

    -Material Library: System and user-level material management, which can be shared and transferred, and supports automatic management of the entire life cycle;

    -Metadata Service: Real-time display of database table structure and partition status.

    Rely on the mutual cooperation of these microservices to build a centralized and unified big data platform service externally.

    Through the construction of these services, we have improved the external service methods and processes of the entire big data platform.

    3 Linkis Business Architecture#

    Business Architecture

    Glossary:

    1) Gateway:

    Based on Spring Cloud Gateway, the plug-in function is enhanced, and a gateway service with WebSocket one-to-many capability is added, which is mainly used to parse and route user requests to designated microservices.

    2) Unified entrance:

    The unified portal is the job lifecycle manager of a certain type of engine operation of the user.

    From job generation to submission to the execution engine, to job information feedback to users and job closure, Entrance manages the entire life cycle of a job.

    3) Engine Manager:

    The engine manager is responsible for managing the entire life cycle of the engine.

    Responsible for applying for and locking resources from the resource management service, instantiating a new engine, and monitoring the life state of the engine.

    4) Execution engine:

    The execution engine is a microservice that actually executes user jobs, and it is started by the engine manager.

    In order to improve the interaction performance, the engine service directly interacts with the unified portal of the job submitted to it, executes the job correctly, and feeds back various information required by the user, such as log, progress, status, and result set.

    5) Resource Management Service

    Real-time control of the resource usage of each system and each user, manage the resource usage and actual load of the microservice cluster, and limit the resource usage and concurrency of the system and users.

    6) Eureka

    Eureka is a service discovery framework developed by Netflix. Spring Cloud integrates it into its sub-project spring-cloud-netflix to realize the service discovery function of Spring Cloud.

    Each microservice has a built-in Eureka Client, which can access Eureka Server and obtain the ability of service discovery in real time.

    4 Linkis processing flow#

    Now let's introduce how the user submits a SQL in the upper system, and how Linkis executes and returns the result.

    Process sequence diagram

    1. The user of the upper system submits a SQL, which passes through the Gateway first, and the Gateway is responsible for parsing the user request and routing it to the appropriate unified entrance Entrance

    2. Entrance will first find out whether there is any Spark engine service available for the user of the system, and if so, it will directly submit the request to the Spark engine service

    3. There is no available Spark engine service, start to discover the function through Eureka's service registration, get a list of all engine managers, and obtain the actual load of the engine manager in real time by requesting RM

    4. Entrance got the engine manager with the lowest load and started asking the engine manager to start a Spark engine service

    5. When the engine manager receives the request, it starts to ask the user under the RM system whether they can start the new engine

    6. If it can be started, start requesting resources and lock; otherwise, an exception of startup failure is returned to Entrance

    7. The resource is successfully locked, and the new spark engine service is started; after the startup is successful, the new spark engine is returned to Entrance

    8. After Entrance got the new engine, it began to request SQL execution from the new engine

    9. Spark's new engine receives SQL requests, starts submitting SQL to Yarn for execution, and pushes logs, progress and status to Entrance in real time

    10. Entrance pushes the obtained logs, progress and status to Gateway in real time

    11. Gateway pushes back logs, progress and status to the front end

    12. Once the SQL execution is successful, Engine actively pushes the result set to Entrance, and Entrance informs the front end to get the result.

    For the design plan under abnormal Entrance/EngineManager/Engine, please refer to UJES Architecture Design Document

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/rm/index.html b/docs/0.11.0/architecture/rm/index.html index 8a6790b0f57..b67e8327a9c 100644 --- a/docs/0.11.0/architecture/rm/index.html +++ b/docs/0.11.0/architecture/rm/index.html @@ -7,7 +7,7 @@ RM design | Apache Linkis - + @@ -21,7 +21,7 @@ Type of Java class (a subclass of Resource class), and the corresponding json serialization method.

  • The Java class (subclass of ResultResource class) of all resource allocation results, and the corresponding json serialization method.

  • The encapsulated RM interface (resource registration, offline, application, available resources and resource release requests).

    After calling the client's interface, the client will generate the corresponding RPC command and pass it to a microservice of RM for processing through the Sender. After RM is processed, the result is also returned to the client via RPC.

  • 7 Multi-instance state synchronization#

    Because RM is a key underlying service, in order to prevent the resource allocation of all services from being affected by an abnormality of an RM instance, it is necessary to ensure that multiple RM instances are in service at the same time, and to ensure that a request is received by which instance Processing can ensure the consistency of the results.

    When a user requests the service of RM, he must request it through the forwarding of the gateway service, and cannot directly request a fixed RM instance. Through the service registration and discovery mechanism, the gateway service identifies the RM instance that normally provides the service, and then forwards the RPC request to one of the instances. This ensures that all requests will be processed by the RM instance in the normal state.

    All resource records of RM are stored in the same database, and all RM instances do not maintain their own state. When RM processes a request, any state change involved will obtain state information from the database in real time after the lock is locked, and immediately update the state back to the database after completing the processing logic, and then release the lock. This ensures that when multiple RMs process requests at the same time, they can always be based on the latest status.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/storage/file_system/index.html b/docs/0.11.0/architecture/storage/file_system/index.html index 3df4f757502..8f3fb56d68d 100644 --- a/docs/0.11.0/architecture/storage/file_system/index.html +++ b/docs/0.11.0/architecture/storage/file_system/index.html @@ -7,7 +7,7 @@ Docking Multiple File Systems | Apache Linkis - + @@ -24,7 +24,7 @@ Users can connect to different file systems by implementing the File System interface, which is extremely convenient for expansion.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html b/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html index 1cc0d212c56..9de41967828 100644 --- a/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html +++ b/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html @@ -7,7 +7,7 @@ Access Remote File System Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Access Remote File System Architecture

    1 Background#

    Normally after a JVM process the user only has access to file reading and writing from the user.

    If:User A starts a JVM process on the linux server. If the user is not root (superuser), they can only access local files on that server and only have permission to operate with User A files.

    But there are many scenarios in which we launch the JVM process through User A, hoping to have access to other user files on local filesystem in the context of non-proliferation of document permissions.

    At the same time, how can the HDFS file system be accessed without HDFS installed locally?

    How do you avoid creating Linux users so you can access the relevant files of that user on HDFs?

    2 Ideas#

    By launching the engine manager (IO-Engineer) of the filesystem on the remote server (what is EngineManager?) and providing a compatible client API, allowing users access to remote file systems.

    The entire architecture is shown below in graph:

    Storage Remote Mode Architecture

    3 Implementation#

    (1) User A calls on the client (IO-Client) of the remote filesystem to IO-Client via the incoming file path (FsPath) and user B for proxy;

    (2) Client (IO-Client) receives FsPath and proxy user B for ProxyFS.

    (3) User A operates through ProxyFS on proxyFS files of proxy user B. If the permissions check for the next steps are passed, then you can perform actions such as adding and deleting, reading and writing of files.

    (4) User A is passed through ProxyFS operations to IO-Client and to remote filesystem services that are transmitted via the network and are obtained through the Smart Routing Service (IR) with a lower load remote file service (IO-Engine) during the transmission process.

    (5) When the remote file service (IO-Engine) receives an IO-Client operation, safety rules are used to determine first whether the transferred token is lawful, then whether the IP is lawful, and then whether User A is authorized to act on the file to User B.

    (6) The IO-Engineering will then access the superuser's Fs through which to access the actual filesystem and operate user B files.Since the IO-Engineering service is started by a superuser, it can access all user files and operates.

    (7) The IO-Engineering operation completed the user B file operation and returned the result to IO-client, thus returning the result to user A, and the complete process for proxy remote access files was completed.

    Note#

    The engine manager (IO-EM) service in the graph above is responsible for stopping the IO-Engineering service.

    The Smart Routing Service (IR) in the above graph is responsible for determining the load of each IO-Engineering and for the balancing redirection of the IO-Client request to send it, and for notifying IOEM to start the new IO-Engineering service if all IO-Engineering is overloaded and IOEM to stop the idle IO-Engineering service when the load is low.

    Through the process above before you can write to you at least:

    From point (5) it is clear that full control of permissions can be achieved and that users can configure their own security rules;

    Features similar to shared storage can be achieved from the remote filesystem service access;

    Multiple filesystems can be supported from points (1) and (2) through different types of incoming FS.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/storage/resultset_file/index.html b/docs/0.11.0/architecture/storage/resultset_file/index.html index 1d147b4eeef..8d00d25e876 100644 --- a/docs/0.11.0/architecture/storage/resultset_file/index.html +++ b/docs/0.11.0/architecture/storage/resultset_file/index.html @@ -7,7 +7,7 @@ ResultSet File Storage | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    ResultSet File Storage

    Result set file storage scheme-Dolphin file

    1 Background#

    Linkis faces the need to store multiple types of data in files, such as storing Hive table data in files, and hopes to save metadata information such as field types, column names, and comments.

    Existing file storage solutions generally only support specific data types for storage. For example, ORC supports data tables but does not support the storage of unstructured data.

    At the same time, support for saving special characters is also the reason that prompted us to define a new file format. For example, if there are special characters such as line breaks in a field in textFile, the content will be abnormal when it is parsed and read.

    Finally, if the content of the file is too large, Linkis usually hopes to provide a paging function. Existing file storage schemes only support how many bytes are skipped, but do not support how many lines are skipped, or only read a certain line in the file.

    2 Ideas#

    Linkis defines a file storage format Dolphin file that stores multiple data types.

    Dolphin file format

    The file structure of Dolphin is shown in the figure above:

    • The Dolphin logo is stored at the beginning of the file to distinguish whether the file is a Dolphin file

    • Metadata: content metadata information

    • index Data: row length index

    • RowData: Row data.

      RowData stores a row of data, such as the data of a row of the table, including the length of the row data and the Byte information of the row data.

    • PostData: Basic file information

    • PostDataLen: Basic information length

    Among them, PostData is the basic information of the file mainly composed of:

    • type: the type of storage content

    • Codec: encoding format

    • Statistical information: The statistical information of the file content includes the number of lines, the maximum and minimum values, etc.

    3 Implementation#

    The specific process of reading and writing Dolphin files is as follows:

    Dolphin file read and write flow chart

    3.1 Write data to Dolphin#

    When the user needs to store the contents of a file (for example: table) in a Dolphin file, the steps are as follows:

    1. Write Dolphin file ID

    2. Write data type Type

    3. Through the serializer (Serializer), write Metadata (metadata) such as the column name of the table, the type of each column, column comments, etc.;

    4. Pass in a row of data to DolphinWriter, DolphinWriter serializes the row of data through a serializer (Serializer) to obtain the row length and serialized Bytes to write to the Dolphin file;

    5. After writing the row of data, it will update the statistical information (Statistical information), increase the number of row records, update the maximum and minimum values ​​of each column, etc.;

    6. DolphinWriter writes PostData (basic information) composed of statistical information and encoding information to the Dolphin file;

    7. Write the length of PostData to complete the write operation.

    3.2 Read Dolphin file#

    The steps for users to read the contents of the Dolphin file are as follows:

    1. Read the Dolphin file ID, and throw an exception if it is not a Dolphin file;

    2. If the user only needs to read Statistical information, read the length of PostData, and obtain PostData according to the length.

      Through PostData, the basic information is parsed into corresponding Type, Codec, MetaData, and Statistical information.

      Return to complete this reading operation.

    3. If the user wants to read data, first read the data type Type.

    4. Read the Metadata information, get the Deserializer through Type, and encapsulate the read Bytes data into MetaData

    5. Read the row length index, and read the row Bytes through the row length index. Obtain Deserializer through Type, convert Bytes into Record data, and encapsulate RowData with Record and MetaData;

    6. The read RowData row content is given to the user to complete the entire reading.

    3.3 Skip#

    Question: How to read a row? How many lines to start reading?

    Answer: When writing a row, the row length index will be written first, so that the user can read the index and skip row reading through the row length index when reading;

    3.4 Serialization#

    The serializer (Serializer) serializes the data into a byte array, and the deserializer (Deserializer) parses the byte array into string data to achieve correct reading and writing of special characters;

    Serializer and Deserializer are related to Type. Different data types can define different Serializer and Deserializer.

    Dolphin provides a common interface for user-defined implementations to support other types of files.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html b/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html index 7cdd96e2c86..6ba629faf09 100644 --- a/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html +++ b/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html @@ -7,7 +7,7 @@ Asynchronous Pool Call | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Asynchronous Pool Call

    How UJES implements full asynchronous thread pool calls

    1 Full Asynchronous Thread Pool for Advantage#

    • 5 Asynchronous Message Queue and Thread Pool

    • Job's thread less than 1 ms per occupation

    • You can accept more than 10,000 + TPS permanent Jobs per entry

    2 How to Implement#

    Full-asynchronous call thread pool

    • How can you improve the upper's request through?

      Entrance WebSocket Processors, internalize a processing thread pool and handler queue to receive the top requests from Spring Cloud Gateway routes.

    • How to ensure that different users in different systems are segregated from one another?

      Entrance Jobschedule, each user of each system has a dedicated thread to ensure isolation.

    • How to ensure job execution?

      The Job Execution Pool is used only for the submission of Job, and once the Job is submitted to Engineering, the horse is placed in the Job's execution queue to ensure that each Job's occupation of the execution pool thread does not exceed 1 millisecond.

      The RPC requests the pool to receive and process engineered logs, progress, status and resultsets and to update the Job's information in real time.

    • How can Job's logs, progress, and status be pushed to the top of the system in real time?

      WebSocket Send Pool, dedicated to processing Job's log, progress and state, and push information to the top system.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html b/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html index 35d8c663281..277a1f9ff5b 100644 --- a/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html +++ b/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html @@ -7,7 +7,7 @@ Spark Engine File Import Export | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Spark Engine File Import Export

    1 Background#

    Data analysts or data warehouses are often required to export data from databases to Excel files for data analysis, or to export data to Excel for users or co-operators.

    Furthermore, users often need to undertake joint analyses of data files such as CSV, Excel and online Hive databases, which need to be imported into the Hive database.

    For more confidential industries, such as banks, data exports often require sensitive export fields such as identity cards, mobile phone numbers.

    2 Thinking#

    Using Spark's distributed computing capability and supporting DataSource, which connects multiple data sources.

    3 Implementation#

    3.1 Export#

    The export process is shown below in graph:

    Export process

    1. The user selects the corresponding data source and the corresponding data form to be exported, such as the user order form in the:Mysql library;

    2. User defines the query statement of data to be exported from the data table, as well as the data transformation to the specified column.

      For example,:defines the export of order forms for the last six months and dissociates user information;

    3. User selects file formats and output paths to export, e.g.:export user order form to excel, path to /home/username/orders.xlsx

    4. Spark read corresponding data based on user configured data sources and tables and querying statements. DataSource supports multiple data storage components such as:Hive,Mysql, Oracle,HDF,Hbase,Mongodb

    5. The data is then processed to DataFrame according to the data conversion format configured by the user

    6. Gets the file write object according to the file format type of the user configuration, e.g.:supports the file writing object for Spark's Excel.Writer's support for multiple file formats such as Excel, exce, Json

    7. Write the corresponding data via writer to the corresponding destination, e.g.:/home/username/orders.xlsx.

    3.2 Import#

    Import process below:

    Import process

    1. The user selects the exported file. File readers will read from incoming files: e.g.:/home/username/orders.xlsx;

    2. Readers read the contents of the previous N line for data type extrapolations, such as reading 10 lines.Reader supports reading in multiple file formats.

    3. Data type extrapolators use the first 10 lines of incoming data type to determine the type of data in each column. The method is to determine the data type in each row by determining the type of data and ultimately by determining the number of times the type appears, and to return to the user.

      e.g.:user:String,orderId:Int;

    4. User selected data sources to import, e.g.:Mysql.Import data also supports multiple selections;

    5. The user chooses whether to create a new tree or rewrite the data or add the data.Select user order form and select data appending;

    6. User-defined data import transformation format and imported column information, such as:decrypting user information

    7. The scheme uses Spark and transforms the file to DataFrame via user incoming data to events and column information;

    8. Generate the corresponding Datasupply via the data source selected by the user

    9. Import processed DataFrame via Datasource to the corresponding data source, e.g.:Mysql library.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/ujes/ujes_design/index.html b/docs/0.11.0/architecture/ujes/ujes_design/index.html index c41278bb3b3..f793016acb7 100644 --- a/docs/0.11.0/architecture/ujes/ujes_design/index.html +++ b/docs/0.11.0/architecture/ujes/ujes_design/index.html @@ -7,7 +7,7 @@ UJES Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    UJES Design

    1 Document Overview#

    1.1 Project background#

    UJES (Unified Job Execution Service), one of Linkis's core components is the Integrated Job Enforcement Service.The project has provided a new generic framework programme for large data ecosystems in the way they are implemented in a microservice framework, which addresses some of the pain points in the use of existing open source projects of the same kind on the market.

    This document is suitable for reading large data functional platforms, in particular the hadoop data ecosphere, with some work experience or interest in learning.

    1.2 Interpretation of terms#

    These terms are explained in this section by the terminology used later in the document for some items.

    1) Gateway:

    UJES's gateway, plugin enhancements based on Spring Cloud Gateway have been introduced, and WebSocket has been added to a multi-capacity gateway service, mainly for forwarding user requests to specified microservices.

    2) Access microservices:

    UJES's Entrance Microservice is the manager of a user's class of assignments.From job generation to submission to execution engine, to assignment feedback to users and operations closed, access microservices managed the life cycle of an operation.

    3) Engine manager:

    UJES's Engine Manager is a microservice that handles start-up engine requests, while also monitoring the life state of the engine.

    4) Execution Engine:

    UJES's implementation engine is a truly microservice to perform user assignments, launched by the Engine Manager and interacts with the portals submitted to it, correctly executes operations and feeds the user needs information.

    5) Application manager:

    UJES's Application Management Microservice is the maintainer of the implementation engine instance information in the cluster, and the entrance microservice always needs this information to get an available implementation engine.

    2 Overall architecture#

    The correct and secure connection of users and data and the provision of powerful and easy data job submissions to users is the goal of the UJES project.

    UJES is positioned as a bridge between the upper application system and the lower computing storage engine.

    Users only need to submit their large data jobs to UJES; UJES will submit them to the lower computing storage engine for execution. The logs, status, progress, results of the operation will be returned from UJES to the user in real time.

    The overall structure of UJES is shown in figure 2.1.

    UJES Overall Architecture

    As shown in the graph above, the UJES framework, which is located between the upper and lower computing application systems, is the managerial role of the user operation, encapsulates data storage, computing and other functions of the large data cluster, provides a uniform operational submission framework, and users no longer need to distinguish between types such as spark or hive, but only to UJES, can properly put clusters at their own service and save for significant learning costs for users.

    3 Logical architecture#

    The UJES's logical framework is designed based on the prevailing microservice architecture model.

    The micro-service framework promotes the division of back-office services into a small group of services, which are coordinated and mutually reinforcing.

    Minor scale communication mechanisms are used between micro-services and micro-services (usually HTTP-based Retful API).

    This architecture model has the advantages of logic, simple deployment, extension, technical isomers, and high reliability.

    The logical structure of UJES is shown in figure 3.1.

    UJES Logical Architecture

    3.1 UJES Operational Main Process#

    A full example of the main process of describing the operation of the UJES project and the functionality of the diagram microservice component will be described in more detail after the main process.

    • 1. user submission assignment, gateway forwarding

      User A submits its own major data assignment to UJES's gateway microservice, such as through Restful or Websocket, which will forward users' requests to the specified entrance microservice in accordance with the specified type of operation, if the user submits a spark-sql code, the gateway will submit the assignment to Spark's entrance microservice.

      Since the access microservice can be deployed in multiple cases, the gateway is transferred to suitable microservices examples in accordance with the strategy of carrying equilibrium.

    • 2.Entry procedure for parsing, checking

      Once the user's job is forwarded to Spark's entrance microservice, the parser in the entrance resolves the user's submitted job into a running task and the persistent task is perpetuated into the database.

      Pre-set interceptors also perform custom variable replacements, malicious code checks, etc. of scripts.

      If a user's request is blocked, his code will not be submitted to the cluster for execution.

    • Setup for listener

      Information generated by the operation of the task will need to be processed once it is produced, such as display to the user or perpetuation to the database, which generally requires the use of event aggregates and listeners, and therefore various types of listeners for the task.

    • 4.Task Access Scheduler

      Once the task is generated, it will enter the scheduler pending schedule.

      The core concept in the Scheduler is the consumer queue, which is identified by the consumer group, which is usually identified by both the user's system and the user, and the consumer group can be marked Test_Anna, if the user submits a task to UJES in the system Test.

      Once the task enters the scheduler it is placed in a different consumer queue waiting for the schedule, based on the group identifier.

      Consumer queue threads are generally implemented in a single thread.

    • Application management micro-service work - providing implementation engine

      Once the task is deployed, the entrance microservice will require an application for implementation through the engine application to the application management microservice.

      The application management microservice will see whether there are engines that users can use in the cluster based on the user's consumer group information. If there is an implementation engine that can be used by the consumer group, the information from the engine will be returned to the entrance microservice and the entrance microservice will refer the task to the executive engine for implementation.

      If the application management microservice finds that there is no engine that the group can use in the cluster, a new implementation engine will be requested from the Engine Manager microservices.

      The consumption thread of the entrance microservice will wait until the app manages the microservice return engine for information that is successful, failed, or timed out.

    • 6. Engine Manager Microservice - Start and Manage Engine

      Engine manager microservice is a microservice that initiates and manages the execution engine.

      When the engine manager receives a request to launch a new engine for the application management microservice, the request will carry user's consumer group information. The engine manager will apply for resources from the resource manager based on the consumer group information. If the user still has sufficient resources, the resource manager will allow the engine manager to start a new engine for the user and broadcast to the application management service microservice.

    • 7. Access microservice submission task to execution engine

      After step 7, the application management microservice has acquired information on the newly launched engine, and the application management microservice returns information from the engine to the entrance microservice, which then submits tasks to the change engine.

    • 8. Entrance interaction with engine

      Once the task is submitted to the implementation engine, the task is run with logs, progress and results information, which is returned to the entrance micro service through the RPC. The return information is carried with the unique identification information of the mission, which is correctly processed by the entrance microservice.

    • Completion of the task

      Once the task is run on the execution engine, the successful or failed state information is returned to the entrance microservice. After the task status in the entrance microservice, the consumer queue will continue to consume tasks in the queue.

    3.2 Details of the architecture and optimization#

    In addition to the main processes described in sub-section 3.1, UJES has its own processes for cluster management and performance enhancement.

    • Task classification and diverse consumption patterns

      Mandates can be categorized according to their own characteristics: completely new missions, retrial missions, duplication of tasks, etc.

      A new task is a new task submitted by the user, a retry task is a task that requires a retry to run a failure in certain circumstances, and a duplication of tasks is a task that is consistent with previous submissions.

      After the task enters the scheduler consumer queue, if it is a new task, it will enter the Consumer's (Consumer) of FIFO for consumption and, in case of duplication, will enter ReUse's consumer for consumption, which will do much more than FIFO and return the results of the previous task to the user.

    • Control of co-generation of engines

      In UJES, an engine that a user can start is controlled, e.g. a user can start up up to three Spark engines.The control of concomitant traffic is ensured by the combination of microservices and microservices for resource managers.

      At most three active tasks per user in the access microservice will be used, so that only three engines will be used.The resource management microservice also provides assurances that if a user is to launch a fourth engine, the engine manager needs to request resources from the resource management microservice, the resource management microservice will refuse to provide resources to that user on the grounds that the number of engines exceeds the limit and the fourth engine will fail.

    • 3. Execute engine heart and unhealthy engines

      The application management microservice needs to perform a core leapfrogging with engines after they get information on the engine to ensure that the engine process is still alive.

      If the engine does not jump back for a period of time, it will be added to the unhealthy engine, so they will not be used when requesting the engine.

    • 4. Natural demise of engines and active killing by users

      The presence of engines will take up cluster resources, particularly the Spark engine, which will take more queue resources, so if the engine manager detects that an execution engine is not used for a long time, then it will be necessary to kill the engine, free the resources of the cluster, and then broadcast to the application manager after the correct killing of the engine.

      Users will also be willing to be active when using UJES. Users will submit a request to the gateway, the gateway will be forwarded to the engine manager, and the engine manager will kill the engine.

    • Tenant segregation

      Multi-Tenant segregation is an important function of the Big Data Functional Platform, and UJES is structurally supportive of multi-tenant segregation, in conjunction with the Hadoop Ecoosphere component.

      User assignments are performed on the execution engine, and UJES's Resource Management Microservice switches to the user to execute system commands when launching a new execution engine, so that the execution engine process is the user's permission, which is completely isolated from engines initiated by other users, thus realizing the multi-tenant separation function.

    • Smart diagnosis

      Smart Diagnostics is a UJES-fine-tuned module where large data operations are often performed with a large amount of data for calculations, as well as a large amount of resources in the cluster and a longer time for an operation.

      Users always want to receive feedback from clusters, such as whether data are tilted and whether queue resources are sufficient.

      Smart diagnostics are designed for this need, and diagnostic modules can analyze the resources and data of the user's job when the job is running, and transmit the content of the analysis to the user in real time.

    4 Interface Design#

    4.1 External Interface Design#

    UJES External Interface means interfaces with users and clusters.

    • 1 User Interface

      UJES’s user access to UJES is usually in Retful and WebSocket.

      Users are required to encapsulate their requests into Json in the prescribed format, and then submit their requests through the post system.

      It is recommended that users access UJES using a web-based approach.The regulation of data exchange will be given after the text.

    • 2 Cluster interface

      UJES's interaction with clusters is determined by engine type.

      As shown in figure 2.1, UJES's implementation engine cuts across the UJES and cluster levels.

      As an example, the Spark execution engine interacts with clusters through the Driver API provided by Spark.

      When using the UJES framework, users can interface with clusters or other server resources according to their needs and characteristics.

    4.2 Framework interface design#

    UJES serves as a framework in which framework developers can access development according to their needs.

    Access to the framework is generally based on SDK, and the UJES framework needs to be implemented for the following interfaces after users have introduced the SDK to UJES via dependency management such as Maven or gradle.

    1) Access interface to entrance microservices

    2) Engine Manager Access Interface

    3) Engine Access Interface

    Allows you to view the UJES's access document.

    4.3 Internal Functional Module Interface Design#

    Interactions between UJES internal functionality modules are based on the Fign-based RPC method. Linkis RPC schema pleaseclick here

    UJES's Entrance, EngineManager and Engineering all communicate via Linkis PRC.

    In particular, the interaction between Entrance and Engineering.Entrance sends user requests via Sender to Engine's Receiver, Engine Receiver, saves the Sender, the sender of the sending terminal, and submits the executing user's request to send the message back to Entrance once the log/progress/state is in place.

    RPC Framework

    5 Deployment structure#

    5.1 Traditional modes of deployment#

    Please view therapid deployment document.

    - + \ No newline at end of file diff --git a/docs/0.11.0/architecture/websocket/index.html b/docs/0.11.0/architecture/websocket/index.html index f8d1da94b72..596454cc97b 100644 --- a/docs/0.11.0/architecture/websocket/index.html +++ b/docs/0.11.0/architecture/websocket/index.html @@ -7,7 +7,7 @@ WebSocket Request | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    WebSocket Request

    Gateway's multi-WebSocket request forward implementation

    1 feature point#

    • Frontend Client and Background WebSocket Microservice more than 1 N support

    • WebSocket Channel All Life Cycle Management

    2 Zuul's Bug#

    Forward WebSocket request is not supported at all.

    3 Spring Cloud Gateway Limitations#

    A WebSocket client can only forward the request to a specific background service and cannot complete a WebSocket client via the gateway API to multiple WebSocket microservices.

    Limitations of Spring Cloud Gateway

    4 Linkis Solution#

    Linkis implemented in Spring Cloud Gateway in WebSocket router transponder to set up WebSocket connections with clients and automatically analyze client WebSocket requests and pass rules to which backend microservice the request is forwarded to the corresponding backend service instance.

    Linkis&#39;s Gateway Scheme

    WebSocket router transponder to build up WebSocket requests for clients, down to multiple WebSocket microservice instances for back-end backenders. In order to implement WebSocket request based on rules to forward clients, the architecture of WebSocket router transponder is:

    WebSocket router schema

    4.1 WebSocket Receiver#

    1) WebSocket receiver is a global filter for Spring Cloud Gateway, which receives client's WebSocket connection request and creates a 1-WebSocket channel for client communication with Spring Cloud Gateway.

    2) At the same time, it listens to the WebSocket channel, sends clients to send incoming requests, obtain essential basic information (such as requests to addresses, uri and users), provide a simple encapsulation and pass to the rulers for processing.

    4.2 Rulers#

    1) Rulers receive a notification from WebSocket Recipient, start processing using rules

    2) URL Ruler

    Linkis defines the client's requested text frame (TextWebSocketFrame) as a JSON string for the following:

    "{'method': '/api/v1/${service}/${uriPath}', 'data': '}"

    where:

    This method is the actual request URI, the previous /api fixed as an API request, v1 refers to the version of the API, service is the name of the requested service, uriPath is the actual request URI.

    Data is actual requested data.

    Get service information by parsing method, pass to step 4.

    3) If the client requests a text frame (TextWebSocketFrame) does not conform to the standard format of the URL Ruler or if the URL Ruler cannot parse service information, then load the user-defined rule to parse service. If all custom rulers cannot parse service messages, then a solution error will be dropped to the client directly; otherwise service information will be passed to the next step directly

    4) Access to service information by step 2 or step 3, when the ruler obtains a list of all healthy microservices from the discovery service (e.g. Eureka), finds all examples of the microservice and selects one of the smallest payload instance to the WebSocket transponder by means of a load equilibrium approach.

    4.3 WebSocket transponder#

    The WebSocket Forwarders are distributed as WebSocket Manager and WebSocket Request Forwarder.

    1) WebSocket Manager

    The WebSocket Manager is responsible for managing the 1-to-1WebSocket connection channel between clients and WebSocket receivers, and the 1-to-multiWebSocket connection channel between WebSocket transponder and backend microservice instances.

    If the client disconnects with WebSocket receiver, the WebSocket Manager will immediately disconnect all related WebSocket transponders from the backend microservice instance by 1 to/multi-WebSocket;

    At the same time, in order to keep all WebSocket transponders and backend microservice instances from being freed for being idle, the WebSocket Manager will always send the backend microservice instance for PingWebSocketFrame.

    2) WebSocket Request Rotor

    WebSocket Request Forwarders get microservice instance information from the Ruler

    Here you need to take note of the distinction between service and service instances:a microservice has multiple instances. Each instance has exactly the same functionality.

    Seek from the WebSocket Manager for the client and the microservice service if there is already a WebSocket transponder to the WebSocket connection channel of the microservice and, if it exists, use the webSocket connection channel to forward the client's request text frame (TextWebSocketFrame); otherwise, create a completely new webSocket connection for the client and this microservice instance and bind the new WebSocket connection and the client to the WebSocket receiver 1 for the 1 WebSocket connection to the web Socket, and then push the information back to the client via the link between the client and the WebSocket receiver.

    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html b/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html index 86e2c8849b8..edba410bd8e 100644 --- a/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html +++ b/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ Install EngineConnPlugin Engine | Apache Linkis - + @@ -18,7 +18,7 @@ sh linkis-daemon.sh restart linkis-engine-plugin-server
    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/production_deployment _guide/index.html b/docs/0.11.0/deployment/production_deployment _guide/index.html index 2a4dd73617c..a20736fe004 100644 --- a/docs/0.11.0/deployment/production_deployment _guide/index.html +++ b/docs/0.11.0/deployment/production_deployment _guide/index.html @@ -7,7 +7,7 @@ Production Deployment Reference Guide | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Production Deployment Reference Guide

    1 Introduction#

             Linkis has been running stably on the WeBank big data production platform for more than two years. The development and operation personnel have summarized a set of Linkis production deployment guidelines to Let Linkis exert its maximum performance on the basis of stable operation, while also saving server resources and reducing usage costs. The guide includes two major categories: deployment method selection and parameter configuration. Finally, Linkis has also been tested in the test environment for a long time. We will give our stress test practice and experience in Chapter 4.

    2 Deployment plan selection#

             Linkis's stand-alone deployment is simple, but it cannot be used in a production environment, because too many processes on the same server will make the server too stressful.

             The choice of deployment plan is related to the company’s user scale, user habits, and the number of concurrent users in the cluster. Generally speaking, we will use Linkis At the same time, the number of users and the user's preference for the execution engine are used to make the choice based on the deployment method.

             The following is a detailed description of the number of simultaneous users. Assuming that users prefer spark the most, hive is the second, and it is recommended that the server host memory is 64G or more.

             On the machine where EngineManager is installed, because the user's engine process will be started, the machine's memory load will be relatively high, and other types of microservices will affect the machine The load is relatively low.

             We generally recommend to reserve about 20G on the server where EM is installed for use by the Linux system, EM's own process and other processes, such as 128G memory For the server, after removing the 20G memory, there is still 100G of memory that can be used to start the engine process. For example, if a Spark Driver has 4G memory, then the server can start up to 25 spark engines.

    The formula for calculating the total resources used: Total resources used by Linkis = total memory + total number of cores =

    Number of people online at the same time * (Driver or Hive client memory) + number of people online at the same time * (Driver or Hive client cores)

    For example, if there are 50 people using at the same time, Spark's Driver memory is 2G, Hive Client memory is 2G, and each engine uses two cores, then it is 50 * 2G + 50 * 2 cores = 100G memory + 100 CPU cores

    Convention before parameter configuration (must see):

    1. The parameters are generally configured in linkis.properties of the conf directory in the microservice installation directory, and configured in the form of key=value, such as wds.linkis.enginemanager.cores.max=20. The only exception is that the configuration of engine microservices needs to be configured in linkis-engine.properties.

    2. After the parameter configuration, the microservice needs to be restarted to take effect. After the engine parameter configuration, after the engine manager of the page is killed, restart the engine to take effect

    A reference deployment plan is provided below.

    2.1 The number of simultaneous users 10-50#

    1). The best recommendation for server configuration: 4 servers, named S1, S2, S3, S4

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1SparkEM requires an exclusive server, because it is assumed that the user most prefers spark (if hive is preferred, it can be modified)
    SparkEntranceS2
    HiveEngineManagerS3
    HiveEntranceS2
    PythonEngineManagerS3
    PythonEntranceS2
    Others (Eureka, gateway, etc.)S4If this machine is under too much pressure, you can add another server to deploy services separately

    2). Minimum server configuration: 2 servers

    3). Parameter configuration

    If you need to do this, you need to configure it in linkis.properties and linkis-engine.properties in the conf directory under the microservice installation directory. Parameter configuration is generally divided into two parameter types, Entrance and EngineManager.

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice2000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size100

    b) EngineManager microservice

    Note: Linkis defines the concept of protecting resources. The purpose of protecting resources is to reserve a certain amount of resources. EM will not use up the maximum resources and activate the role of protecting the machine.

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.2 Number of concurrent users 50-100#

    1). Recommended server configuration: 7 servers, named S1, S2...S7

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2
    SparkEntranceS5
    HiveEngineManagerS3, S4
    HiveEntranceS5
    PythonEngineManagerS4
    PythonEntranceS4
    Eureka, Gateway, RMS6Eureka and RM require high availability deployment
    PublicService, RM, Datasource, EurekaS7Eureka and RM require high availability deployment

    2). Minimum server configuration: 4 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice3000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size120

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.3 Number of simultaneous users 100-300#

    1). Recommended server configuration: 11 servers, named S1, S2...S11

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2, S3, S4
    SparkEntranceS8
    HiveEngineManagerS5, S6, S7
    HiveEntranceS8
    PythonEngineManagerS9
    PythonEntranceS9
    Eureka, Gateway, RMS10Eureka and RM require high availability deployment
    PublicService, RM, Datasource, Eurekas11Eureka and RM require high availability deployment

    2). Minimum server configuration: 6 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice4000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size150

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.4 Number of concurrent users 300-500#

    1). Recommended server configuration 15 servers, named S1, S2, S3, S4

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2, S3, S4, S5, S6, S7
    SparkEntranceS12
    HiveEngineManagerS8, S9, S10, S11
    HiveEntranceS12
    PythonEngineManagerS13
    PythonEntranceS13
    Eureka, Gateway, RMS14Eureka and RM require high availability deployment
    PublicService, RM, Datasource, Eurekas15Eureka and RM require high availability deployment

    2). Minimum server configuration: 10 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice5000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size150

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    2.5 The number of simultaneous users is more than 500#

    1). Recommended server configuration: 25 servers, named S1, S2.. S19, S25

    Service NameDeployment SelectionDescription
    SparkEngineMangerS1, S2, S3, S4, S5, S6, S7
    S8, S9, S10
    SparkEntranceS17
    HiveEngineManagerS11,S12,S13,S14,S15,
    S16
    HiveEntranceS17
    PythonEngineManagerS18, S19
    PythonEntranceS20
    Eureka, RMS21Eureka and RM require high availability deployment
    RM, ,EurekaS22Eureka and RM require high availability deployment
    Eureka, PublicServiceS23Eureka and RM require high availability deployment
    Gateway, DatasourceS24

    2). Minimum server configuration: 15 servers

    3). Parameter configuration

    a) Entrance microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.rpc.receiver.asyn.queue.size.maxSpecify the queue size of RPC messages received by the entrance microservice5000
    wds.linkis.rpc.receiver.asyn.consumer.thread.maxSpecify Entrance microservice RPC consumption thread pool size200

    b) EngineManager microservice

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.enginemanager.memory.maxUsed to specify the total memory of all engines started by the EM process40G (64) or 100G (128)
    wds.linkis.enginemanager.cores.maxUsed to specify the total number of cores of all engines started by the EM process20
    wds.linkis.enginemanager.engine.instances.maxUsed to specify the total number of all engines started by the EM process20
    wds.linkis.enginemanager.protected.memoryUsed to specify the memory used by the EM process for protection2G (meaning that up to 38 (40-2) G of memory can be used)
    wds.linkis.enginemanager.protected.cores.maxUsed to specify the number of cores used for protection by the EM process2 (meaning that up to 18 (20-2) cores can be used)
    wds.linkis.enginemanager.protected.engine.instancesUsed to specify the number of engines used for protection by the EM process1 (meaning that up to 19 (20-1) engines can be started)

    3 Other general parameter configuration#

    In addition to the two types of microservices, Entrance and EngineManager, Linkis has other microservices that also have their own parameters for configuration.

    3.1 PublicService custom configuration#

    The publicService microservice carries various auxiliary functions run by Linkis, including file editing and saving, and result set reading.

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.workspace.filesystem.get.timeoutUsed to specify the timeout time for obtaining the file system10000 (unit is ms)
    wds.linkis.workspace.resultset.download.maxsizeUsed to specify the maximum number of rows of the download result set5000 (up to 5000 downloads) or -1 (full download)

    3.2 Engine Microservice#

    Engine microservices are available at any time, including spark, hive and python engines. The configuration parameters of engine microservices need to be modified in linkis-engine.properties under conf in the EngineManager installation directory.

    Parameter nameParameter functionSuggested parameter value
    wds.linkis.engine.max.free.timeUsed to specify how long an engine will be killed if idle3h (meaning that an engine will be automatically killed after three hours of not performing a task)

    4 Summary#

    The deployment plan of Linkis is closely related to how it is used. At the same time, the number of users is the biggest influencing factor. In order to enable users to use it comfortably and reduce the cost of cluster servers, it is necessary for operation and maintenance developers to try and listen to user feedback. If it has been deployed The plan is inappropriate, and the deployment plan needs to be changed in a timely and appropriate manner.

    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/quick_deploy/index.html b/docs/0.11.0/deployment/quick_deploy/index.html index a5a3f5501e1..1194c55d37b 100644 --- a/docs/0.11.0/deployment/quick_deploy/index.html +++ b/docs/0.11.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -32,7 +32,7 @@ // 3. Start code execution val jobExecuteResult = client.execute(JobExecuteAction.builder() .setCreator("LinkisClient-Test") //creator, requesting the system name of the Linkis client, used for system-level isolation .addExecuteCode("show tables") //ExecutionCode The code to be executed .setEngineType(EngineType.SPARK) // The execution engine type of Linkis that you want to request, such as Spark hive, etc. .setUser("${username}").build()) //User, request user; used for user-level multi-tenant isolation println("execId: "+ jobExecuteResult.getExecID + ", taskId:" + jobExecuteResult.taskID) // 4. Get the execution status of the script var status = client.status(jobExecuteResult) while(!status.isCompleted) { // 5. Get the execution progress of the script val progress = client.progress(jobExecuteResult) val progressInfo = if(progress.getProgressInfo != null) progress.getProgressInfo.toList else List.empty println("progress: "+ progress.getProgress + ", progressInfo:" + progressInfo) Utils.sleepQuietly(500) status = client.status(jobExecuteResult) } // 6. Get the job information of the script val jobInfo = client.getJobInfo(jobExecuteResult) // 7. Get the list of result sets (if the user submits multiple SQL at a time, multiple result sets will be generated) val resultSet = jobInfo.getResultSetList(client).head // 8. Get a specific result set through a result set information val fileContents = client.resultSet(ResultSetAction.builder().setPath(resultSet).setUser(jobExecuteResult.getUser).build()).getFileContent println("fileContents: "+ fileContents) IOUtils.closeQuietly(client)}
    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/quick_start/index.html b/docs/0.11.0/deployment/quick_start/index.html index 44a465f0a19..efff6187405 100644 --- a/docs/0.11.0/deployment/quick_start/index.html +++ b/docs/0.11.0/deployment/quick_start/index.html @@ -7,7 +7,7 @@ Quick Start | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Quick Start

    Start script needs to be executed after installation

    1 Start Service#

    Execute the following commands in the installation directory, start all services:

      ./bin/start-all.sh > start.log 2>start_error.log

    2 View successful startup#

    You can view service startup success on the Eureka interface, see method:

    Use http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}, open in browser, see whether the service was registered successfully.

    If you do not specify in config.sh, EUREKA_INSTAL_IP_SPECIALL_IP, then HTTP address is:http://127.0.0.1:20303

    As shown in the figure below, if the following microservices appear on your Eureka homepage, it means that the services have been started successfully and you can provide services to the outside world normally:

    Eureka

    3 Quick Use Linkis#

    Please refer toto quickly use Linkis

    - + \ No newline at end of file diff --git a/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html b/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html index 67496e65036..7f0899775a7 100644 --- a/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Source Code Directory Structure

    Linkis hierarchical directory structure explanation, if you want to detail Linkis, please check Linkis related architecture design documents

    ├─assembly├─bin├─conf├─core //Core abstraction, which contains all common modules│ ├─cloudModule //Modules that must be introduced by microservices, embedded Jetty + WebSocket + SpringBoot + Jersey│ ├─cloudMybatis //Mybatis module of SpringCloud│ ├─cloudProtocol //General protocol, such as RPC communication between Entrance and Engine│ ├─cloudRPC //RPC module, complex two-way communication based on Feign implementation│ ├─common //Common module, built-in many common tools│ ├─httpclient //Java SDK top-level interface│ └─scheduler //General scheduling module├─db //Database information├─docs //All documents├─eurekaServer //Eureka module├─extensions //plugin│ └─spark-excel //spark supports excel to DF/DF to excel plug-in├─gateway //Gateway module│ ├─core //Gateway core implementation, including authentication/analysis/routing of front-end interfaces│ ├─gateway-httpclient-support //gateway support for Java SDK│ ├─gateway-ujes-support //Analysis and routing support for UJES interface│ └─springcloudgateway //Introduce spring cloud gateway, front-end requests are intercepted from here├─publicService //public service│ ├─application //application module│ ├─bin│ ├─conf│ ├─configuration //Parameter module, get the engine parameters from here│ ├─database //Provide Hive metadata query service│ ├─query //Provide Job Manager and Job History│ ├─udf //UDF module│ ├─variable //User-defined variable module│ └─workspace //Workspace module, manage user scripts├─resourceManager //Resource management service│ ├─resourcemanagerclient //resource management client│ ├─resourcemanagercommon //Common module│ └─resourcemanagerserver //Resource management server├─storage //Unified storage service│ ├─pesIO //Remote storage service│ │ ├─io-engine //The engine side of remote storage, which actually accesses the bottom storage side│ │ ├─io-enginemanager //engineManger for remote storage│ │ └─io-entrance //Request entry for remote storage│ └─storage //Unified external interface for unified storage└─ujes //Unified operation execution service│ ├─client //Java SDK, users can directly access Linkis through Client│ ├─definedEngines //Implemented engines│ │ ├─hive //Hive engine│ │ │ ├─engine //The engine execution end of the actual docking with the underlying Hive│ │ │ ├─enginemanager│ │ │ └─entrance│ │ ├─pipeline //Import and export engine for mutual conduction between storage systems│ │ │ ├─engine│ │ │ ├─enginemanager│ │ │ └─entrance│ │ ├─python //stand-alone Python engine│ │ │ ├─engine //The engine execution end that actually docks with the underlying Python│ │ │ ├─enginemanager│ │ │ └─entrance│ │ ├─spark //spark engine│ │ │ ├─engine //The actual connection to the engine execution end of the underlying Spark│ │ │ ├─enginemanager│ │ │ └─entrance│ │ └─tispark //TiSpark engine, actually docking with TiSpark engine│ ├─engine //General low-level engine module│ ├─enginemanager //General low-level enginemanager module│ ├─entrance //General low-level entrance module│ └─entranceclient //Simplified version of entrance
    - + \ No newline at end of file diff --git a/docs/0.11.0/development/compile_and_package/index.html b/docs/0.11.0/development/compile_and_package/index.html index 623740bdf8e..9aa17bed9bd 100644 --- a/docs/0.11.0/development/compile_and_package/index.html +++ b/docs/0.11.0/development/compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Compile And Package

    1 Fully compile Linkis#

    Compilation environment requirements: JDK8 or above is required for compilation, and both Oracle/Sun and OpenJDK are supported.

    After obtaining the project code from git, use maven to package the project installation package.

    Please note: The official recommendation is to use Hadoop-2.7.2, Hive-1.2.1, Spark-2.4.3 and Scala-2.11.12 to compile Linkis.

    If you want to use other versions of Hadoop, Hive, and Spark to compile Linkis, you can enter the root directory of the Linkis source code package and manually modify the relevant version information of the pom.xml file, as follows:

        cd incubator-linkis-x.x.x    vim pom.xml
        <properties>              <hadoop.version>2.7.2</hadoop.version>        <hive.version>1.2.1</hive.version>        <spark.version>2.4.3</spark.version>                      <scala.version>2.11.12</scala.version>        <jdk.compile.version>1.8</jdk.compile.version>                  </properties>

    (1) If you are using it locally for the first time, you must first execute the following command in the root directory of the Linkis source code package:

        cd incubator-linkis-x.x.x    mvn -N install

    (2) Execute the following command in the root directory of the Linkis source code package:

        cd incubator-linkis-x.x.x    mvn clean install

    (3) Obtain the installation package, under the assembly->target directory of the project:

        wedatasphere-linkis-x.x.x/assembly/target/wedatasphere-linkis-x.x.x-dist.tar.gz

    2 Compile a single service#

    After obtaining the project code from git, use maven to package the project installation package.

    (1) If you are using it locally for the first time, you must first execute the following command in the root directory of the Linkis source code package:

        cd incubator-linkis-x.x.x    mvn -N install

    (2) Jump to the corresponding module through the command line in the terminal, such as

        cd publicService

    (3) Execute the compile command in the pom.xml directory corresponding to the module:

        mvn clean install

    (4) Obtain the installation package, there will be a compiled package in the ->target directory of the corresponding module:

       target/linkis-publicservice.zip
    - + \ No newline at end of file diff --git a/docs/0.11.0/development/install-server/index.html b/docs/0.11.0/development/install-server/index.html index 1eeecb335d6..55bc7d47fb7 100644 --- a/docs/0.11.0/development/install-server/index.html +++ b/docs/0.11.0/development/install-server/index.html @@ -7,7 +7,7 @@ Installation Of A Single Service | Apache Linkis - + @@ -17,7 +17,7 @@
    • Additional parameters added by SparkEngineManager
        ## Configure engine jar    wds.linkis.enginemanager.core.jar=$SERVER_HOME/$SERVERNAME/lib/linkis-ujes-spark-engine-version.jar    ##Configure main jar    wds.linkis.spark.driver.conf.mainjar=$SERVER_HOME/$SERVERNAME/conf:$SERVER_HOME/$SERVERNAME/lib/*
    - + \ No newline at end of file diff --git a/docs/0.11.0/development/new_engine_conn/index.html b/docs/0.11.0/development/new_engine_conn/index.html index 253c5681a38..8742ec338cd 100644 --- a/docs/0.11.0/development/new_engine_conn/index.html +++ b/docs/0.11.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    How To Quickly Implement A New Engine

    1 General introduction#

            When back-end developers use Linkis, they can not only directly use the execution engine that Linkis has developed, but also use the framework to develop their own applications according to their own needs.

            Linkis can be abstracted into Entrance, EngineManager and Engine modules. Among them, the role and architecture of the Entrance, EngineManager and Engine three modules can be viewed in the UJES architecture design document.

            Users only need to implement the necessary interfaces of the three modules to implement their own Linkis engine.

    2 Access operation#

    2.1 Entrance access#

    2.1.1 maven dependency#

    <dependency>  <groupId>com.webank.wedatasphere.linkis</groupId>  <artifactId>linkis-ujes-entrance</artifactId>  <version>0.5.0</version></dependency>

    2.1.2 Interfaces to be implemented#

    Entrance has no interfaces that must be instantiated. The following interfaces can be implemented as needed:

    • EntranceParser. Used to transfer a request from the front end, usually a Json body, into a task that can be persisted. This class has provided AbstractEntranceParser, users only need to implement the parseToTask method, and the system provides CommonEntranceParser implementation by default.

      CommonEntranceParser

    • EngineRequester. Used to obtain a RequestEngine class, which is used to request a new Engine from the EngineManager microservice. Linkis already has an implementation class.

      EngineRequesterImpl

    • Scheduler. It is used to implement scheduling. By default, the scheduling mode of multi-user concurrency and FIFO execution within a single user has been implemented.

      FIFOScheduler

    2.2 EngineManager access#

    2.2.1 maven dependency#

    <dependency>  <groupId>com.webank.wedatasphere.linkis</groupId>  <artifactId>linkis-ujes-enginemanager</artifactId>  <version>0.5.0</version></dependency>

    2.2.2 Interfaces to be implemented#

    EngineManager needs to implement the following interfaces as needed:

    • EngineCreator, AbstractEngineCreator already exists, and the createProcessEngineBuilder method needs to be implemented to create an EngineBuilder.

              Here, ProcessEngineBuilder has provided a JavaProcessEngineBuilder class by default, this class is an abstract class, and the necessary classpath, JavaOpts, GC file path, log file The path, and the opening of the DEBUG port in the test mode have been completed.

              Now JavaProcessEngineBuilder, you only need to add additional classpath and JavaOpts.

      AbstractEngineCreator

    • EngineResourceFactory, AbstractEngineResourceFactory already exists, and the getRequestResource method needs to be implemented to get the user's personalized resource request.

      EngineResourceFactory

    • hooks, this is a spring entity bean, mainly used to add pre and post hooks before and after the engine is created and started. The user needs to provide an Array[EngineHook] for dependency injection.

      hooks

              For specific examples, please refer to the implementation of Hive EngineManager.

    • resources, this is a spring entity bean, mainly used for registering resources like RM. Resources are instances of ModuleInfo, which need to be provided by the user for dependency injection.

      resources

    2.3 Engine access#

    2.3.1 maven dependency#

    <dependency>  <groupId>com.webank.wedatasphere.linkis</groupId>  <artifactId>Linkis-ujes-engine</artifactId>  <version>0.5.0</version></dependency>

    2.3.2 Interfaces to be implemented#

    1. The interfaces that Engine must implement are as follows:
    • EngineExecutorFactory. To create an EngineExecutor, the createExecutor method needs to be implemented. Specifically, an EngineExecutor is created through a Map that stores parameters.

    EngineExecutorFactory

    • EngineExecutor. The actual real executor is used to submit and execute the code submitted by entrance. Need to implement getActualUsedResources (resources actually used by the engine), executeLine (execute a line of code parsed by CodeParser), executeCompletely (the supplementary method of executeLine, if the call to executeLine returns ExecuteIncomplete, then the new Code and the previous return ExecuteIncomplete The code is passed to the engine for execution at the same time)

    EngineExecutor

    1. The interfaces or beans that the Engine does not have to implement are as follows:
    • engineHooks: Array[EngineHook], is a spring bean. EngineHook is the pre- and post-hook created by the engine. At present, the system has provided two hooks: CodeGeneratorEngineHook is used to load UDFs and functions, and ReleaseEngineHook is used to release idle engines. If not specified, the system will provide engineHooks=Array(ReleaseEngineHook by default )

    engineHooks

    • CodeParser. Used to parse the code so that it can be executed line by line. If not specified, the system defaults to provide a CodeParser that directly returns all codes.

    CodeParser

    • EngineParser, used to convert a RequestTask into a Job that can be submitted to the Scheduler. If not specified, the system will provide an EngineParser that converts the RequestTask into a CommonEngineJob by default.

    EngineParser

    3 Reference examples#

            This section will provide a reference example by introducing the writing of the hive engine.

    3.1 HiveEntrance access#

            According to the description in the second section, Entrance has no interfaces that must be implemented, the code in linkis-0.5.0, and the entry of hive only have two classes. It is only used for error code extension.

    3.2. HiveEngineManager access#

    -1. Implementation of EngineCreator interface

    HiveEngineCreator

            From the above figure, we can see that there is a HiveEngineCreator class in the HiveEM module, which inherits the AbstractEngineCreator class, and also implements the createProcessEngineBuilder method, returning a HiveQLProcessBuilder.

    -2.HiveQLProcessBuilder implementation

            HiveEngineManager has a HiveQLProcessBuilder class in this module, which inherits from JavaProcessEngineBuilder. This class implements a number of necessary interfaces and also overrides the build method. In fact, the parent The build method of the class is complete enough. HiveQLProcessBuilder overrides the build method to obtain the parameters passed in by the user and then add them to the startup command.

    HiveQLProcessBuilder

    -3. AbstractEngineResourceFactory interface implementation

    HiveEngineResourceFactory

            In this instance, we can notify the ResourceManager of the number of CPU cores, memory size, and number of instances that the user wants to obtain each time the user requests the engine

    -4. Injection of resources and hooks bean

    HiveBeans

            From the figure above, we can see that by injecting a Spring configuration

    Configuration, inject two beans, resources and hooks, UJES framework itself will provide @ConditionalMissingBean annotation to inject default beans, developers can inject their own entity beans according to their own needs. In this class instance bean, the user can register the total memory of EngineManager service, the total number of CPU cores, and the total number of instances that can be created in RM.

    3.3 HiveEngine access#

    -1. EngineExecutorFactory interface implementation

            There is a HiveEngineExecutorFactory in the HiveEngine module. At the end of the createExecutor method, the HiveEngineExecutor is returned.

    HiveEngineExecutorFactory

    -2.EngineExecutor interface implementation

            where executeLine is an interface that must be implemented, which is to pass in a line of script separated by CodeParser and return ExecuteResponse (success or failure).

    HiveEngineExecutor

            In addition, executeCompletely also needs to be implemented, which is called when executeLine returns IncompleteReponse.

            Engine has some common methods when performing operations, such as close kill pause progress and other methods that can be implemented according to your needs.

    4 FAQ#

            Welcome to add group questions.

    WeChat group

    - + \ No newline at end of file diff --git a/docs/0.11.0/development/start-server/index.html b/docs/0.11.0/development/start-server/index.html index c6a1ad7aaf6..519a8c1fdca 100644 --- a/docs/0.11.0/development/start-server/index.html +++ b/docs/0.11.0/development/start-server/index.html @@ -7,7 +7,7 @@ Start Of A Single Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Start Of A Single Service

    1 jump to corresponding service directory#

    e.g. PublicService

        cd linkis-publicservice

    2 Execute Launch#

        sh start-publicservice.sh

    3 Startup Success Check#

    • (1) Judging logs can be viewed by looking at linkis.out
        less -i logs/linkis.out
    • By viewing the Eureka interface

    View service startup on Eureka interface, see method:

    Use http://${EUREKA_INSTALL_IP}:${EUREKA_PORT}, open in browser, see whether the service was registered successfully.

    If your Eureka home page shows a microservice, it indicates that the service is started successfully and can be provided externally:

    Eureka

    - + \ No newline at end of file diff --git a/docs/0.11.0/engine_usage/hive/index.html b/docs/0.11.0/engine_usage/hive/index.html index be9acf1e042..f4598c5f062 100644 --- a/docs/0.11.0/engine_usage/hive/index.html +++ b/docs/0.11.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine | Apache Linkis - + @@ -20,7 +20,7 @@ Figure 2 Hive running effect Figure 2

    2 Hive engine implementation#

            The implementation of the Hive execution engine is to implement the necessary interfaces of the Entrance, EngineManager and Engine three modules with reference to the Linkis development document. The Engine module is the most special, Hive The way of implementation also has its own set of logic.

            The Release version now provided by Linkis is based on hadoop version 2.7.2, hive version is 1.2.1, both are apache versions.

            Linkis's Hive engine interacts with the underlying hive mainly through the HiveEngineExecutor class, which is instantiated by the HiveEngineExecutorFactory bean.

            In the executeLine interface implemented by HiveEngineExecutor, Linkis uses the CommandProcessorFactory class provided by hive to pass in local hive configuration information to obtain an org.apache.hadoop. The hive.ql.Driver class, the Driver class provides an API to help submit the user's script code to the cluster for execution.

            After the driver submits the hive sql code, there is an API to provide whether the execution is successful and to obtain the result set after the success is obtained. If the execution is successful, with the help of the unified storage service provided by Linkis, the result set will be stored in the specified directory for users to view.

            In addition, after the Driver submits hive sql, if a mapreduce task is generated, we can also kill the submitted hive query task through the killRunningJobs API provided by HadoopJobExecHelper , This is the logic of the user's foreground kill task.

            One more thing, Linkis's hive engine also implements a progress function. Specifically, the runningJobs field of HadoopJobExecHelper is used to obtain the running MR tasks, and then these MR tasks have corresponding map and reduce progress. You can get the total progress of the task by doing a mathematical calculation. It should be noted that runningJobs is running The MR job will be deleted from the List once it is executed, so it is necessary to get the execution plan of SQL at the beginning. For details, please refer to the implementation of the code.

    3 Adapt your own hive version#

            Because the current version of Linkis is the apache version that supports 1.2.1, many users' clusters may not be consistent with our company, so you need to recompile the Hive execution engine by yourself .

            For example, if the user is using the 1.1.0 cdh version, he needs to change the hive.version to the specified version in the top-level pom.xml and then Compile.

    When we were adapting, we also found that there was a conflict in the jar package. This requires the user to check the log to eliminate it. If the cause is still unclear, welcome to join the group for consultation.

    WeChat group

    4 Future goals#

    1. Seamlessly adapt to more hive versions.
    2. The deployment method is simpler, try to use the containerized method.
    3. The function is more complete, and it is more accurate and complete in terms of execution progress, data accuracy, etc.
    - + \ No newline at end of file diff --git a/docs/0.11.0/engine_usage/python/index.html b/docs/0.11.0/engine_usage/python/index.html index fdbad5e09ce..7931a4c54cd 100644 --- a/docs/0.11.0/engine_usage/python/index.html +++ b/docs/0.11.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine | Apache Linkis - + @@ -20,7 +20,7 @@ Figure 3 Spark running effect Figure 2

    2 Implementation of Python engine#

            The implementation of the Linkis-Python execution engine is based on How to implement a new engine to implement the Entrance, EngineManager and Engine three The necessary interface of the module.

            The implementation of the execution module uses the py4j framework to allow the python executor to interact with the JVM. After the user submits the code, the JVM submits the code to the py4j framework The python interpreter executes and gets the output or error message from the python process.

            Specifically, you can view the python.py source code in the python execution source code. There are several python methods defined by Linkis for process interaction.

    3 Future goals#

    1. The deployment method is simpler, try to use the containerized method.
    2. Support the submission of spark jar package
    3. Better support the submission of spark's yarn-cluster mode.
    - + \ No newline at end of file diff --git a/docs/0.11.0/engine_usage/spark/index.html b/docs/0.11.0/engine_usage/spark/index.html index 87754928c3a..5fe0487ef6c 100644 --- a/docs/0.11.0/engine_usage/spark/index.html +++ b/docs/0.11.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine | Apache Linkis - + @@ -23,7 +23,7 @@ -3. Better support for spark's yarn-cluster submission.

    - + \ No newline at end of file diff --git a/docs/0.11.0/introduction/index.html b/docs/0.11.0/introduction/index.html index 9ac6e2cfb59..18e04c9269d 100644 --- a/docs/0.11.0/introduction/index.html +++ b/docs/0.11.0/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 0.11.0

    Introduction

    Linkis is an open source for micro-banks that addresses connectivity, access and reuse issues between front-office tools, applications, and various computing storage engines.

    Introduction#

    Linkis Github repo: https://github.com/apache/incubator-linkis

    Linkis, a single computing storage engine such as Spark, TiSpark, Hive, Python and HBase, provides a unified REST/WebSocket/JDBC interface to submit data intermediaries for implementation of SQL, Pyspark, HiveQL, Scala.

    Linkis, based on microservice structures, provides enterprise-level features such as financial multi-tenant segregation, resource controls, segregation of competencies, supports uniform variables, UDF, functionality, user resource document management, high-parallel, high-performance and high-availability large data operations/requests for life-cycle management capabilities.

    Background#

    The widespread use of large data technologies has led to a proliferation of upper-tier applications and lower computing engines.

    Business needs are met through the introduction of multiple open source components, and the continuous updating and enrichment of the large data platform architecture is a common practice for almost all enterprises at this stage.

    As shown in the graph below, when our upper-tier applications, tool systems and bottom computing storage components become more frequent, the entire data platform becomes a network structure as shown above.


    Raw Data Ecological Map


    Continuously introducing new components to achieve business needs, more and more pain points have also arisen:

    1. Business needs vary from one end to another, upper layers of components are unique and users use them to break up with a strong sense of fragmentation and high learning costs.

    2. The diversity of data, the complexity of storing and computing is such that a component usually solves only one problem and developers must have a well-developed technical stack.

    3. The introduction of new components, such as multiple tenants segregation, user resource management and user permissions management, are not compatible with pre-existing data platforms, and customized development from the top down, not only works large but also replicates rotates.

    4. The upper-tier application directly interfaces the bottom computing storage engine and will directly affect the normal use of business products as soon as the background changes occur.

    Original design intention#

    How to provide a uniform data intermediary, block all calls and usage details from the bottom, and really get business users to focus only on the realization of the business, even if the extension and overall relocation of the bottom platform is not affected, the Linkis's original design!

    Linkis Solution

    Technical architecture#

    Technical architecture

    As shown in the graph above, we have created several new microservice clusters based on SpringCloud microservice technology to build Linkis's intermediate capacity as well.

    Each microservice cluster assumes a part of the functional responsibilities of the system, which we have clearly delineated as follows.e.g.:

    • Unified job execution service:A distributed REST/WebSocket service to receive various access requests from the parent system.

      Currently supported computing engines have:Spark, Python, TiSpark, Hive and Shell.

      Supported script languages include:SparkSQL, Spark Scala, Pyspark, R, Python, HQL and Shell;

    • Resource management service: supports real-time control of resource use by each system and user, limits resource usage and confluence of systems and users, and provides real-time resource dynamics graphs to facilitate access to and management of system and user resources;

      Currently supported resource types:Yarn queue resources, servers (CPU and memory), number of concurrent users etc.

    • Unified storage service:Universal IO, capable of fast interfacing various storage systems, providing a uniform call entry, supporting all commonly used formats, having a high degree of integration, simple usage;
    • Unified context service:Unify user and system resource files (user script, JAR, ZIP, Properties, etc.), for users, systems, computing engine parameters and variable management in one setting, automatically reference;
    • Repository services:Systems and user level material management, shared and flowed to support automatic management throughout the life cycle;
    • Metadata service:Real-time Hive Library Structure and Partition Show.

    Building on the interaction of these microservice clusters, we have improved the way and process of external service across the large data platform.

    Business architecture#

    Operational framework

    Name Explanation:

    1) Gateway gateway:

    Plugins are enhanced based on Spring Cloud Gateway, adding more than 1 N support for front-end and background WebSocket Microservice (detailed architecture implementation, mainly for parsing and routing users requests to specified microservices.

    2) Unified entry:

    Unified entry is a job life cycle manager for a class of engines.

    Entrance manages the entire lifecycle of an operation from the receiving job, the assignment submission to the execution engine, the assignment execution feedback to the user and the assignment completion.

    3) Engine manager:

    Engine managers manage the engine throughout its life cycle.

    It is responsible for requesting and locking resources from the resource management service and for instantiating new engines, as well as monitoring the life state of the engine.

    4) Execution Engine:

    The execution engine is a microservice that truly executes user assignments. It is started by the engine manager.

    In order to enhance interaction, the execution engine interacts directly with the Unified Entrance and delivers the log, progress, status and results of the execution in real time to the Unified Entrance.

    5) Resource management services

    Real-time controls the use of resources per system and per user, the use and actual load of the engine manager, and limits the use and concomitant use of resources by systems and users.

    6) Eureka

    Eureka is the service discovery framework developed by Netflix, and SpringCloud is integrated into its subproject spring-cloud-netflix to achieve SpringCloud service discovery features.

    Eureka Client is built into each microservice and has access to Eureka Server and ability to find service in real time.

    Processes#

    How does Linkis handle a SparkSQL submission from the parent system?

    Process time series

    1. Users of the upper system submit a SQL, first passing through Gateway, Gateway, which resolves user requests and routes them to the appropriate Unified Entry Entrance

    2. The entry will first look for whether the user of the system has the available Spark engine service and, if so, will submit the request directly to the Spark Engine Service

    3. No Spark Engine service available, start finding features via Eureka service registration, get a list of all engine managers and get the actual load of engine managers by requesting RM in real time

    4. Entrance gets the lowest payload engine manager, starting to require the engine manager to start a Spark engine service

    5. The Engine Manager received a request and started asking the user under RM if the new engine could be started

    6. Start requesting resources and locking if you can start; otherwise return the failed exception gives Entrance

    7. Successfully locked the resource. Start the new spark engine service; return the new Spark engine to Entrance after successful startup

    8. When Entrance gets a new engine, start requesting SQL execution from the new engine

    9. Spark new engine received SQL requests, started submitting SQL to Yarn, and sent logs, progress and status to Entrance in real time

    10. Entrance delivers logs, progress and status to Gateway in real time

    11. Gateway Back Logs, Progress, and Status to Frontend

    12. Once SQL has been successfully executed, Engineering has taken the initiative to push the results set to Entrance, Entrance notifies the frontend to obtain the results.

    How to ensure high real-time#

    It is well known that Spring Cloud is integrated as a communication tool between microservices.

    HTTP interface calls between Feign-based microservices only support an instance of random access to BMS under simple rules.

    But what does Linkis do to do with the implementation engine of Linkis, which can directly push logs, progress and status to the single entry for which it is requested?

    Linkis has implemented its own base RPC communication program based on Feign.

    Linkis RPC Architecture

    As shown in the graph above, we have encapsulated Sender and Receiver on the basis of Feign.

    Sender is directly available as a sender. Users can specify a microservice instance or randomly access it, and support broadcasts.

    Receiver, as the receiving end, requires users to implement the Receiver-interface to handle the true business logic.

    Sender offers three types of visits, as follows::

    1. The ask method is the synchronous request response method, which requires the receiving end to be synchronized with the response;

    2. Send's method is syncing the request method, only for synchronizing sending requests to the receiving end and not requesting answers from the receiving end;

    3. Delivery is an asynchronous request method, as long as the process at the sending end does not exit, the request will be sent to the receiving end later through other threads.

    How to support high concurrency#

    Linkis designed 5 Asynchronous Message Queue and Thread Pools, with Jobs using less than 1 milliseconds per occupation to ensure that more than 10,000 + TPS Resident Job requests can be accepted per single entry.

    Full-asynchronous call thread pool

    • How can you improve the upper's request through?

      Entrance WebSocket Processors, internalize a processing thread pool and handler queue to receive the top requests from Spring Cloud Gateway routes.

    • How to ensure that different users in different systems are segregated from one another?

      Entrance Jobschedule, each user of each system has a dedicated thread to ensure isolation.

    • How to ensure job execution?

      The Job Execution Pool is used only for the submission of Job, and once the Job is submitted to Engineering, the horse is placed in the Job's execution queue to ensure that each Job's occupation of the execution pool thread does not exceed 1 millisecond.

      The RPC requests the pool to receive and process engineered logs, progress, status and resultsets and to update the Job's information in real time.

    • How can Job's logs, progress, and status be pushed to the top of the system in real time?

      WebSocket Send Pool, dedicated to processing Job's log, progress and state, and push information to the top system.

    User-Level Isolation and Scheduling Timeliness#

    Linkis has designed the Scheduler module, the group schedule consumption module that can be intelligently monitored and expanded to achieve Linkis’s high combined capacity.

    Group Scheduler Cost Architecture

    Each user of each system is grouped separately to ensure segregation at system level and user level.

    Each consumer has an independent control thread, measuring the length of the consumer waiting queue, the number of events being implemented and the proportion of time spent growing.

    The consumer conglomerates set thresholds and warning ratios for these indicators, and the control thread is immediately extended as soon as an indicator exceeds the threshold, and the ratio between or between indicators exceeds the limit (e.g., monitoring to the average implementation time is greater than the distribution interval parameter).

    When extended, the above reference process is fully utilized, with a specific parameter being targeted and other parameters automatically extended.

    Summary#

    Linkis, as a data intermediary, has made many attempts and efforts to block details of lower level calls.

    Like:Linkis, how to implement the Unified Storage Service?How can Linkis unify UDF, function and user variables?

    Due to space limitations, this paper is no longer discussed in detail, and you are welcome to visit ourofficial networkhttps://linkis.apache.org

    Is there a set of truly open-source, self-developed and well-developed financial production environments and scenes that can be returned to the middle of data in the open source communities so that people can be relatively comfortable taking services for production, supporting financial-grade operations, and securing business-class characteristics?

    We want Linkis to be the answer.

    At the same time, we look forward to more community strength to work together to promote Linkis's growth.

    - + \ No newline at end of file diff --git a/docs/0.11.0/tags/index.html b/docs/0.11.0/tags/index.html index b9579be2b05..cda34d1e762 100644 --- a/docs/0.11.0/tags/index.html +++ b/docs/0.11.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html b/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html index 23774f94f82..a5d0b18e6d5 100644 --- a/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html +++ b/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.9.0 To 0.9.1 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ The eureka module does not need to be updated

    2.2 Only modules that need to be updated#

          You only need to upgrade the Linkis-related modules to 0.9.1:

    1. linkis-gateway
    2. linkis-resourceManager
    3. linkis-ujes-hive-enginemanager
    4. linkis-ujes-hive-entrance
    5. linkis-ujes-jdbc-entrance
    6. linkis-ujes-python-entrance
    7. linkis-ujes-spark-entrance

    Upgrade steps:

    1. Delete the 0.9.0 package

    2. Unzip the corresponding service directory and copy the package to the corresponding lib directory

    Linkis-gateway needs to modify the configuration of linkis.properties:

    #Add parameterswds.linkis.gateway.conf.enable.token.auth=true#Modify the following parameterswds.linkis.gateway.conf.url.pass.auth=/dws/

    Linkis-gateway needs to copy the proxy configuration token.properties to the conf directory:

    2.3 Add material library related packages#

    Need to increase the module of the material library related package

    1.linkis-publicservice added bml support and added bml client

    linkis-bmlclient-0.9.1.jarlinkis-bmlcommon-0.9.1.jarlinkis-gateway-httpclient-support-0.9.1.jarlinkis-httpclient-0.9.1.jar

    In addition, the netty package has been added:

    netty-3.6.2.Final.jar

    In addition, you need to configure the gateway address in linkis.properties:

    wds.linkis.gateway.ip=127.0.0.1wds.linkis.gateway.port=9001
    1. linkis-ujes-python-enginemanager and linkis-ujes-spark-enginemanager added bml support and added bml client
    linkis-bmlclient-0.9.1.jarlinkis-bmlcommon-0.9.1.jarlinkis-bml-hook-0.9.1.jarlinkis-gateway-httpclient-support-0.9.1.jarlinkis-httpclient-0.9.1.jar

    Upgrade steps:

    1. Delete the 0.9.0 package

    2. Unzip the corresponding service directory and copy the package to the corresponding lib directory

    2.4 Services that need to update configuration and package#

    The service that needs to update the configuration and package: linkis-metadata

          After decompressing the linkis-metadata installation package, you need to modify the configuration in the conf:

    1. Application.yml modify eureka address
    2. linkis.properties configure Linkis database and Hive metadata database address configuration:
    //Linkis database connection informationwds.linkis.server.mybatis.datasource.url=jdbc:mysql://wds.linkis.server.mybatis.datasource.username=wds.linkis.server.mybatis.datasource.password=//Hive metabase address is not hiveServer2hive.meta.url=hive.meta.user=hive.meta.password=

    2.5 Newly added services#

    Newly added service: linkis-bml

    After downloading the linkis-bml installation package and decompressing it, modify the configuration in conf:

    1. Application.yml modify eureka address
    2. linkis.properties configure Mybatis related configuration:
    wds.linkis.server.mybatis.datasource.url=jdbc:mysql://wds.linkis.server.mybatis.datasource.username=wds.linkis.server.mybatis.datasource.password=
    1. Import the sql data of bml to mysql
    cd db/;source linkis-bml.sql
    - + \ No newline at end of file diff --git a/docs/0.11.0/user_guide/1.0_sdk_manual/index.html b/docs/0.11.0/user_guide/1.0_sdk_manual/index.html index 52a68b53003..e772ea19caa 100644 --- a/docs/0.11.0/user_guide/1.0_sdk_manual/index.html +++ b/docs/0.11.0/user_guide/1.0_sdk_manual/index.html @@ -7,7 +7,7 @@ Use of 1.0 SDK | Apache Linkis - + @@ -58,7 +58,7 @@
    - + \ No newline at end of file diff --git a/docs/0.11.0/user_guide/X_sdk_manual/index.html b/docs/0.11.0/user_guide/X_sdk_manual/index.html index 7b738b8851e..116ba4fc5d7 100644 --- a/docs/0.11.0/user_guide/X_sdk_manual/index.html +++ b/docs/0.11.0/user_guide/X_sdk_manual/index.html @@ -7,7 +7,7 @@ Use of 0.X SDK | Apache Linkis - + @@ -33,7 +33,7 @@ // 6. Get the job information of the script val jobInfo = client.getJobInfo(jobExecuteResult) // 7. Get the list of result sets (if the user submits multiple SQL at a time, multiple result sets will be generated) val resultSetList = jobInfoResult.getResultSetList(client) println("All result set list:") resultSetList.foreach(println) val oneResultSet = jobInfo.getResultSetList(client).head // 8. Get a specific result set through a result set information val fileContents = client.resultSet(ResultSetAction.builder().setPath(oneResultSet).setUser(jobExecuteResult.getUser).build()).getFileContent println("First fileContents: ") println(fileContents) } catch { case e: Exception => { e.printStackTrace() } } IOUtils.closeQuietly(client)}
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/jdbc_api/index.html b/docs/1.0.2/api/jdbc_api/index.html index 5f9e7da4ce9..b7a7f8d6859 100644 --- a/docs/1.0.2/api/jdbc_api/index.html +++ b/docs/1.0.2/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/linkis_task_operator/index.html b/docs/1.0.2/api/linkis_task_operator/index.html index 71d68de7cdf..04f2929fd62 100644 --- a/docs/1.0.2/api/linkis_task_operator/index.html +++ b/docs/1.0.2/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit for Execution#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {"variable": {}, "configuration": {}},    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    {    "executionContent": {"code": "show tables", "runType": "sql"},    "params": {"variable": {}, "configuration": {}},    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.hql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}

    -Return to example

    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Return to example

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Return example, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress#

    • Interface /api/rest_j/v1/entrance/${execID}/progress

    • Submission method GET

    • Return to example

    {  "method": "/api/rest_j/v1/entrance/{execID}/progress",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "progress": 0.2,    "progressInfo": [        {        "id": "job-1",        "succeedTasks": 2,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        },        {        "id": "job-2",        "succeedTasks": 5,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        }    ]  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/login_api/index.html b/docs/1.0.2/api/login_api/index.html index 2050a9c1540..876b1283931 100644 --- a/docs/1.0.2/api/login_api/index.html +++ b/docs/1.0.2/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.0.2/api/overview/index.html b/docs/1.0.2/api/overview/index.html index 8327b171953..b77b14fde78 100644 --- a/docs/1.0.2/api/overview/index.html +++ b/docs/1.0.2/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/add_an_engine_conn/index.html b/docs/1.0.2/architecture/add_an_engine_conn/index.html index d4380d589c7..121aabfd203 100644 --- a/docs/1.0.2/architecture/add_an_engine_conn/index.html +++ b/docs/1.0.2/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/commons/message_scheduler/index.html b/docs/1.0.2/architecture/commons/message_scheduler/index.html index e12fbfd9719..794640ed5f9 100644 --- a/docs/1.0.2/architecture/commons/message_scheduler/index.html +++ b/docs/1.0.2/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Sceduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/commons/rpc/index.html b/docs/1.0.2/architecture/commons/rpc/index.html index 29386b190be..d2fe8158daf 100644 --- a/docs/1.0.2/architecture/commons/rpc/index.html +++ b/docs/1.0.2/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html index 79136826a52..e6319fe6e3e 100644 --- a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, micro service management, and historical task query for other micro service modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session Session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 8939f90a81a..cbf064f4b54 100644 --- a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 3ae658f7f65..4e216fa5093 100644 --- a/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/entrance/index.html b/docs/1.0.2/architecture/computation_governance_services/entrance/index.html index e2cd99bfc6d..2d1b4a8493c 100644 --- a/docs/1.0.2/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html index 2ab487a7069..97a4fb4037c 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 48b88678a98..cad0afedb4f 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html index a4b7734e4c3..525bda29ecf 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html index 0f9504ff4da..047be0ced71 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 6b0ce92d55f..ac17e11fd49 100644 --- a/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/computation_governance_services/overview/index.html b/docs/1.0.2/architecture/computation_governance_services/overview/index.html index 24a4766d813..9286f05a978 100644 --- a/docs/1.0.2/architecture/computation_governance_services/overview/index.html +++ b/docs/1.0.2/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html index 33b9cf48ab5..a87d6dfc8ee 100644 --- a/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html index 64138d872cd..38c5c8821b3 100644 --- a/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follow:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physica tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the this ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html b/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html index 5019a12bfc2..c6900209ee5 100644 --- a/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/microservice_governance_services/overview/index.html b/docs/1.0.2/architecture/microservice_governance_services/overview/index.html index 0c66a049084..f53ad2deef2 100644 --- a/docs/1.0.2/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.0.2/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -30,7 +30,7 @@ As the request receiver, the Receiver will be provided to process the request sent by the Sender in order to complete the synchronous response or asynchronous response.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/overview/index.html b/docs/1.0.2/architecture/overview/index.html index 06dfb267d08..832637da683 100644 --- a/docs/1.0.2/architecture/overview/index.html +++ b/docs/1.0.2/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis's ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/bml/index.html b/docs/1.0.2/architecture/public_enhancement_services/bml/index.html index d734a75ace5..d5fc7019091 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/bml/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html index 4dc2d0db12c..adea9e62c00 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 6f4db75eb28..bc959554727 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html index 87fe412f445..3bacd91efa5 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 3e5bed31c52..22dc92fa2eb 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 23bd43e3bc7..cb8b4bb4743 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 7bbcbbe77dd..16cca30e9ad 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html index 345e971e314..1d136a55bfd 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html index 71a9a33d99a..a4c22e06f4d 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/overview/index.html b/docs/1.0.2/architecture/public_enhancement_services/overview/index.html index cd0a36fef46..6ea04c8cac7 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html b/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html index 32617eacac6..f85c7eb72c7 100644 --- a/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.0.2/contact/index.html b/docs/1.0.2/contact/index.html index f71ca406f03..8ba0b51b6a8 100644 --- a/docs/1.0.2/contact/index.html +++ b/docs/1.0.2/contact/index.html @@ -7,7 +7,7 @@ Contact Us | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/cluster_deployment/index.html b/docs/1.0.2/deployment/cluster_deployment/index.html index 7ce5ab53003..ee8b88342fb 100644 --- a/docs/1.0.2/deployment/cluster_deployment/index.html +++ b/docs/1.0.2/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ Replicas will also display the replica nodes adjacent to the cluster.

    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html b/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html index 5fe8ff6dbcd..1cc406796f7 100644 --- a/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended to specify wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/installation_hierarchical_structure/index.html b/docs/1.0.2/deployment/installation_hierarchical_structure/index.html index a4714273b92..21572843241 100644 --- a/docs/1.0.2/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.0.2/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Installation directory structure

    The directory structure of Linkis1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice CovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/quick_deploy/index.html b/docs/1.0.2/deployment/quick_deploy/index.html index d60ed5e195c..7fa37e72005 100644 --- a/docs/1.0.2/deployment/quick_deploy/index.html +++ b/docs/1.0.2/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -23,7 +23,7 @@ # set the connection information of the database # including ip address, database's name, username and port # Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory. MYSQL_HOST= MYSQL_PORT= MYSQL_DB= MYSQL_USER= MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

        sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Linkis quick startup#

    (1). Start services

    Run the following commands on the installation directory to start all services.

      sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all of the following micro-services are registered on theEureka, it means that they've started successfully and are able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html index 91b3f493492..24e21f820e3 100644 --- a/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information||-- web //Management desk code of linkis
    - + \ No newline at end of file diff --git a/docs/1.0.2/deployment/web_install/index.html b/docs/1.0.2/deployment/web_install/index.html index 8032ca85ad8..b963f9d6ecb 100644 --- a/docs/1.0.2/deployment/web_install/index.html +++ b/docs/1.0.2/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -20,7 +20,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.0.2/development/linkis_compile_and_package/index.html b/docs/1.0.2/development/linkis_compile_and_package/index.html index 62cfc54bf6a..ecd5d73fe4a 100644 --- a/docs/1.0.2/development/linkis_compile_and_package/index.html +++ b/docs/1.0.2/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.0.2/development/linkis_debug/index.html b/docs/1.0.2/development/linkis_debug/index.html index 548982ac379..45a4a0b9d98 100644 --- a/docs/1.0.2/development/linkis_debug/index.html +++ b/docs/1.0.2/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -44,7 +44,7 @@ [linkis-cg-engineplugin]nohup java -DserviceName=linkis-cg-engineplugin -Xmx512M -XX:+UseG1GC -Xloggc:/data/LinkisInstallDir/logs/linkis-cg-engineplugin-gc.log -cp /data/LinkisInstallDir/conf/:/data/LinkisInstallDir /lib/linkis-commons/public-module/*:/data/LinkisInstallDir/lib/linkis-computation-governance/linkis-cg-engineplugin/* org.apache.linkis.engineplugin.server.LinkisEngineConnPluginServer 2>&1> /data /LinkisInstallDir/logs/linkis-cg-engineplugin.out &

    Remote debugging service steps#

    todo

    - + \ No newline at end of file diff --git a/docs/1.0.2/development/new_engine_conn/index.html b/docs/1.0.2/development/new_engine_conn/index.html index a8888a60c92..f73b27d8ce2 100644 --- a/docs/1.0.2/development/new_engine_conn/index.html +++ b/docs/1.0.2/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -17,7 +17,7 @@ b) createExecutor: creates a "HiveEngineConnExecutor" executor object based on the current engine connection information.

    Hive engine is an ordinary Java process, so when implementing "EngineConnLaunchBuilder", it directly inherits "JavaProcessEngineConnLaunchBuilder". Like memory size, Java parameters and classPath, it can be adjusted through configuration, please refer to "EnvConfiguration" class for details.

    Hive engine uses "LoadInstanceResource resources", so there is no need to implement "EngineResourceFactory", directly use the default "GenericEngineResourceFactory", adjust the number of resources through configuration, refer to "EngineConnPluginConf" class for details.

    Implement "HiveEngineConnPlugin" and provide methods for creating the above implementation classes.

    - + \ No newline at end of file diff --git a/docs/1.0.2/development/web_build/index.html b/docs/1.0.2/development/web_build/index.html index 52fa3cddb96..0466090ebe3 100644 --- a/docs/1.0.2/development/web_build/index.html +++ b/docs/1.0.2/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/hive/index.html b/docs/1.0.2/engine_usage/hive/index.html index 761524e5f16..eb716081837 100644 --- a/docs/1.0.2/engine_usage/hive/index.html +++ b/docs/1.0.2/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/jdbc/index.html b/docs/1.0.2/engine_usage/jdbc/index.html index d3ac0bd4593..c65402698a3 100644 --- a/docs/1.0.2/engine_usage/jdbc/index.html +++ b/docs/1.0.2/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/overview/index.html b/docs/1.0.2/engine_usage/overview/index.html index a64747402ef..a591e36428f 100644 --- a/docs/1.0.2/engine_usage/overview/index.html +++ b/docs/1.0.2/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/python/index.html b/docs/1.0.2/engine_usage/python/index.html index eb465befa87..1a7095690e9 100644 --- a/docs/1.0.2/engine_usage/python/index.html +++ b/docs/1.0.2/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/shell/index.html b/docs/1.0.2/engine_usage/shell/index.html index c2a782ae24c..b5f4c25fb19 100644 --- a/docs/1.0.2/engine_usage/shell/index.html +++ b/docs/1.0.2/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.0.2/engine_usage/spark/index.html b/docs/1.0.2/engine_usage/spark/index.html index 23417f4d00a..658513d7dc4 100644 --- a/docs/1.0.2/engine_usage/spark/index.html +++ b/docs/1.0.2/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.0.2/introduction/index.html b/docs/1.0.2/introduction/index.html index 357e404a6e0..50421ba665e 100644 --- a/docs/1.0.2/introduction/index.html +++ b/docs/1.0.2/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -23,7 +23,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.0.2/tags/index.html b/docs/1.0.2/tags/index.html index e53c0c91a9f..ae26b804035 100644 --- a/docs/1.0.2/tags/index.html +++ b/docs/1.0.2/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html b/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html index c7d5cc2e819..d6c0346c7dd 100644 --- a/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includecom.webank.wedatasphere.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file
    - + \ No newline at end of file diff --git a/docs/1.0.2/tuning_and_troubleshooting/overview/index.html b/docs/1.0.2/tuning_and_troubleshooting/overview/index.html index 6e16b2a35a1..78504d283e1 100644 --- a/docs/1.0.2/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.0.2/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html b/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html index 3076ea3be1c..e4249b1b6d6 100644 --- a/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.0.2/upgrade/overview/index.html b/docs/1.0.2/upgrade/overview/index.html index 113317b0ba5..b3f36ea313c 100644 --- a/docs/1.0.2/upgrade/overview/index.html +++ b/docs/1.0.2/upgrade/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Overview

    The architecture of Linkis1.0 is very different from Linkis0.x, and there are some changes to the configuration of the deployment package and database tables. Before you install Linkis1.0, please read the following instructions carefully:

    1. If you are installing Linkis for the first time, or reinstalling Linkis, you do not need to pay attention to the Linkis Upgrade Guide.

    2. If you are upgrading from Linkis0.x to Linkis1.0, be sure to read the Linkis Upgrade from 0.x to 1.0 guide carefully.

    - + \ No newline at end of file diff --git a/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index ded0e1c7561..2caadeb5080 100644 --- a/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/console_manual/index.html b/docs/1.0.2/user_guide/console_manual/index.html index bf92cbbcf41..0a6f40082f8 100644 --- a/docs/1.0.2/user_guide/console_manual/index.html +++ b/docs/1.0.2/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.2

    Introduction to Computatoin Governance Console

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    Structure of Computatoin Governance Console

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • [Global History]
    • [Resource Management]
    • [Parameter Configuration]
    • [Global Variables]
    • [ECM Management] (Only visible to linkis computing management console administrators)
    • [Microservice Management] (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    Introduction to the functions and use of Computatoin Governance Console

    Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/how_to_use/index.html b/docs/1.0.2/user_guide/how_to_use/index.html index 9fb7d460f2c..776b9f04795 100644 --- a/docs/1.0.2/user_guide/how_to_use/index.html +++ b/docs/1.0.2/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/linkiscli_manual/index.html b/docs/1.0.2/user_guide/linkiscli_manual/index.html index 7737e75ecd0..384d006c5cd 100644 --- a/docs/1.0.2/user_guide/linkiscli_manual/index.html +++ b/docs/1.0.2/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/overview/index.html b/docs/1.0.2/user_guide/overview/index.html index cd1189c511a..bf8657022b9 100644 --- a/docs/1.0.2/user_guide/overview/index.html +++ b/docs/1.0.2/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.2/user_guide/sdk_manual/index.html b/docs/1.0.2/user_guide/sdk_manual/index.html index a461927475e..7a6687520c5 100644 --- a/docs/1.0.2/user_guide/sdk_manual/index.html +++ b/docs/1.0.2/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -47,7 +47,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/jdbc_api/index.html b/docs/1.0.3/api/jdbc_api/index.html index f9dd78b981d..c0df4d116b6 100644 --- a/docs/1.0.3/api/jdbc_api/index.html +++ b/docs/1.0.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/linkis_task_operator/index.html b/docs/1.0.3/api/linkis_task_operator/index.html index 9aa4ca467bf..86a44e4dac0 100644 --- a/docs/1.0.3/api/linkis_task_operator/index.html +++ b/docs/1.0.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit for Execution#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {"variable": {}, "configuration": {}},    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    {    "executionContent": {"code": "show tables", "runType": "sql"},    "params": {"variable": {}, "configuration": {}},    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.hql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}

    -Return to example

    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Return to example

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Return example, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress#

    • Interface /api/rest_j/v1/entrance/${execID}/progress

    • Submission method GET

    • Return to example

    {  "method": "/api/rest_j/v1/entrance/{execID}/progress",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "progress": 0.2,    "progressInfo": [        {        "id": "job-1",        "succeedTasks": 2,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        },        {        "id": "job-2",        "succeedTasks": 5,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        }    ]  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/login_api/index.html b/docs/1.0.3/api/login_api/index.html index c6c06a61f5a..06cce6288bf 100644 --- a/docs/1.0.3/api/login_api/index.html +++ b/docs/1.0.3/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.0.3/api/overview/index.html b/docs/1.0.3/api/overview/index.html index 9097ddea701..679f53d0bf5 100644 --- a/docs/1.0.3/api/overview/index.html +++ b/docs/1.0.3/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/add_an_engine_conn/index.html b/docs/1.0.3/architecture/add_an_engine_conn/index.html index 1570ab64124..f1f958b66f3 100644 --- a/docs/1.0.3/architecture/add_an_engine_conn/index.html +++ b/docs/1.0.3/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/commons/message_scheduler/index.html b/docs/1.0.3/architecture/commons/message_scheduler/index.html index a417a551fc9..342faf12128 100644 --- a/docs/1.0.3/architecture/commons/message_scheduler/index.html +++ b/docs/1.0.3/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/commons/rpc/index.html b/docs/1.0.3/architecture/commons/rpc/index.html index 366307c5443..c610e47c1ce 100644 --- a/docs/1.0.3/architecture/commons/rpc/index.html +++ b/docs/1.0.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html index 8c36ab53f10..029d7e4fb82 100644 --- a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 1b96c647cef..5fb33e75b22 100644 --- a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 9f4a7193c03..6687dd28b5c 100644 --- a/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/entrance/index.html b/docs/1.0.3/architecture/computation_governance_services/entrance/index.html index 3e56f36c176..5049b6ef029 100644 --- a/docs/1.0.3/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html index 7507a47896b..f342e6909f0 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index c0f64307075..78f0ce9a1d4 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 2a300e2eddf..39291c9196b 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 2bf95db8429..b535fa8e274 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index aa5f8e50bbe..0c9bc8f4f03 100644 --- a/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/computation_governance_services/overview/index.html b/docs/1.0.3/architecture/computation_governance_services/overview/index.html index 7da89e962eb..1bc21426f97 100644 --- a/docs/1.0.3/architecture/computation_governance_services/overview/index.html +++ b/docs/1.0.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html index 41faa7588ce..ce94e882d14 100644 --- a/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html index 4b14a281d14..72b6ccb1b67 100644 --- a/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html b/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html index 5f448d72909..48be394ea37 100644 --- a/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/microservice_governance_services/overview/index.html b/docs/1.0.3/architecture/microservice_governance_services/overview/index.html index 3445d7e2a74..d9039facba8 100644 --- a/docs/1.0.3/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.0.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/overview/index.html b/docs/1.0.3/architecture/overview/index.html index e6a89f35eb8..dbead7529ee 100644 --- a/docs/1.0.3/architecture/overview/index.html +++ b/docs/1.0.3/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/bml/index.html b/docs/1.0.3/architecture/public_enhancement_services/bml/index.html index 5e097defede..0d4e52a0d82 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/bml/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html index 218220ce10b..4e0b51e78f3 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 968c4280894..c8a095f9b71 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index c966fce4fd6..b97ac967eb7 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index b9c12972339..69ac4ad483a 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index a9642939858..f0811c307db 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index a8bbd2cf320..489439ac5fd 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index 85bd7969a02..e407773952b 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html index e834b181430..34f566f913b 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/overview/index.html b/docs/1.0.3/architecture/public_enhancement_services/overview/index.html index 684074e2fc5..53ec03a33ed 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html b/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html index bf13bd6312c..621e654c7ea 100644 --- a/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.0.3/contact/index.html b/docs/1.0.3/contact/index.html index b19a85f53d1..72b759b5cff 100644 --- a/docs/1.0.3/contact/index.html +++ b/docs/1.0.3/contact/index.html @@ -7,7 +7,7 @@ Contact Us | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/cluster_deployment/index.html b/docs/1.0.3/deployment/cluster_deployment/index.html index c318567b8a5..2d9d3673037 100644 --- a/docs/1.0.3/deployment/cluster_deployment/index.html +++ b/docs/1.0.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ Replicas will also display the replica nodes adjacent to the cluster.

    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html b/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html index 8ff77eb46ae..d34b5600475 100644 --- a/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/installation_hierarchical_structure/index.html b/docs/1.0.3/deployment/installation_hierarchical_structure/index.html index 5147b76bc54..5824f6f8535 100644 --- a/docs/1.0.3/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.0.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/quick_deploy/index.html b/docs/1.0.3/deployment/quick_deploy/index.html index 9bb23f644ce..6761e7a9a3a 100644 --- a/docs/1.0.3/deployment/quick_deploy/index.html +++ b/docs/1.0.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html index 4549b0c29d8..1c056928252 100644 --- a/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.0.3/deployment/web_install/index.html b/docs/1.0.3/deployment/web_install/index.html index 5be5982352d..11aa5cfb529 100644 --- a/docs/1.0.3/deployment/web_install/index.html +++ b/docs/1.0.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.0.3/development/linkis_compile_and_package/index.html b/docs/1.0.3/development/linkis_compile_and_package/index.html index 6605b3bef05..2b3796dfc5f 100644 --- a/docs/1.0.3/development/linkis_compile_and_package/index.html +++ b/docs/1.0.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/linkis_debug/index.html b/docs/1.0.3/development/linkis_debug/index.html index 2abb0d16b5e..f8c35f8490d 100644 --- a/docs/1.0.3/development/linkis_debug/index.html +++ b/docs/1.0.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -44,7 +44,7 @@ [linkis-cg-engineplugin]nohup java -DserviceName=linkis-cg-engineplugin -Xmx512M -XX:+UseG1GC -Xloggc:/data/LinkisInstallDir/logs/linkis-cg-engineplugin-gc.log -cp /data/LinkisInstallDir/conf/:/data/LinkisInstallDir /lib/linkis-commons/public-module/*:/data/LinkisInstallDir/lib/linkis-computation-governance/linkis-cg-engineplugin/* org.apache.linkis.engineplugin.server.LinkisEngineConnPluginServer 2>&1> /data /LinkisInstallDir/logs/linkis-cg-engineplugin.out &

    Remote debugging service steps#

    todo

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/new_engine_conn/index.html b/docs/1.0.3/development/new_engine_conn/index.html index b64a3b47e4a..23cbb8d0a32 100644 --- a/docs/1.0.3/development/new_engine_conn/index.html +++ b/docs/1.0.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -53,7 +53,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/springmvc-replaces-jersey/index.html b/docs/1.0.3/development/springmvc-replaces-jersey/index.html index 8a167cb97a5..218888ed1de 100644 --- a/docs/1.0.3/development/springmvc-replaces-jersey/index.html +++ b/docs/1.0.3/development/springmvc-replaces-jersey/index.html @@ -7,7 +7,7 @@ SpringMVC Replaces Jersey | Apache Linkis - + @@ -17,7 +17,7 @@

    For details, please refer to

    - + \ No newline at end of file diff --git a/docs/1.0.3/development/web_build/index.html b/docs/1.0.3/development/web_build/index.html index 91148142f88..83a2bd857bc 100644 --- a/docs/1.0.3/development/web_build/index.html +++ b/docs/1.0.3/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/flink/index.html b/docs/1.0.3/engine_usage/flink/index.html index c790802e192..6026bb17dab 100644 --- a/docs/1.0.3/engine_usage/flink/index.html +++ b/docs/1.0.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/hive/index.html b/docs/1.0.3/engine_usage/hive/index.html index 4f7d35edebd..f9c6ad5caa6 100644 --- a/docs/1.0.3/engine_usage/hive/index.html +++ b/docs/1.0.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/jdbc/index.html b/docs/1.0.3/engine_usage/jdbc/index.html index a5160da29bf..7061330888e 100644 --- a/docs/1.0.3/engine_usage/jdbc/index.html +++ b/docs/1.0.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/overview/index.html b/docs/1.0.3/engine_usage/overview/index.html index 19de8e1f5b6..2f25d2aed36 100644 --- a/docs/1.0.3/engine_usage/overview/index.html +++ b/docs/1.0.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/python/index.html b/docs/1.0.3/engine_usage/python/index.html index 82f850738f3..c7271f1873c 100644 --- a/docs/1.0.3/engine_usage/python/index.html +++ b/docs/1.0.3/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/shell/index.html b/docs/1.0.3/engine_usage/shell/index.html index 795adffb5cf..72adfb08812 100644 --- a/docs/1.0.3/engine_usage/shell/index.html +++ b/docs/1.0.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.0.3/engine_usage/spark/index.html b/docs/1.0.3/engine_usage/spark/index.html index 7c69f61ea25..9c6c1d11b22 100644 --- a/docs/1.0.3/engine_usage/spark/index.html +++ b/docs/1.0.3/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.0.3/introduction/index.html b/docs/1.0.3/introduction/index.html index 9fb4889a325..ae25b7bdafb 100644 --- a/docs/1.0.3/introduction/index.html +++ b/docs/1.0.3/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -23,7 +23,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.0.3/tags/index.html b/docs/1.0.3/tags/index.html index 6485e45b73e..c2589ee923f 100644 --- a/docs/1.0.3/tags/index.html +++ b/docs/1.0.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html b/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html index 9d39ae9fb93..3c22b642ba4 100644 --- a/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file
    - + \ No newline at end of file diff --git a/docs/1.0.3/tuning_and_troubleshooting/overview/index.html b/docs/1.0.3/tuning_and_troubleshooting/overview/index.html index ddba747936d..92d5dddc7e6 100644 --- a/docs/1.0.3/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.0.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html b/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html index 21f5eac4d8c..281b0c92e67 100644 --- a/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.0.3/upgrade/overview/index.html b/docs/1.0.3/upgrade/overview/index.html index 98b1c90486e..08772f16d71 100644 --- a/docs/1.0.3/upgrade/overview/index.html +++ b/docs/1.0.3/upgrade/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Overview

    The architecture of Linkis1.0 is very different from Linkis0.x, and there are some changes to the configuration of the deployment package and database tables. Before you install Linkis1.0, please read the following instructions carefully:

    1. If you are installing Linkis for the first time, or reinstalling Linkis, you do not need to pay attention to the Linkis Upgrade Guide.

    2. If you are upgrading from Linkis0.x to Linkis1.0, be sure to read the Linkis Upgrade from 0.x to 1.0 guide carefully.

    - + \ No newline at end of file diff --git a/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 8b9322e844a..8367201a018 100644 --- a/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/console_manual/index.html b/docs/1.0.3/user_guide/console_manual/index.html index 53fb1e17a14..38620ea653b 100644 --- a/docs/1.0.3/user_guide/console_manual/index.html +++ b/docs/1.0.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.0.3

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/how_to_use/index.html b/docs/1.0.3/user_guide/how_to_use/index.html index 7c1bf67f837..966f3fe772d 100644 --- a/docs/1.0.3/user_guide/how_to_use/index.html +++ b/docs/1.0.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/linkiscli_manual/index.html b/docs/1.0.3/user_guide/linkiscli_manual/index.html index e9396356433..983308357fc 100644 --- a/docs/1.0.3/user_guide/linkiscli_manual/index.html +++ b/docs/1.0.3/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/overview/index.html b/docs/1.0.3/user_guide/overview/index.html index 54b5e79a4d2..f58af710d5c 100644 --- a/docs/1.0.3/user_guide/overview/index.html +++ b/docs/1.0.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.0.3/user_guide/sdk_manual/index.html b/docs/1.0.3/user_guide/sdk_manual/index.html index bfbbbe7bcab..911e9741c4d 100644 --- a/docs/1.0.3/user_guide/sdk_manual/index.html +++ b/docs/1.0.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/http/data-source-manager-api/index.html b/docs/1.1.0/api/http/data-source-manager-api/index.html index 096a5393d5c..f726e352e9b 100644 --- a/docs/1.1.0/api/http/data-source-manager-api/index.html +++ b/docs/1.1.0/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/http/metadatamanager-api/index.html b/docs/1.1.0/api/http/metadatamanager-api/index.html index 21a65fa34e9..519f1fcf408 100644 --- a/docs/1.1.0/api/http/metadatamanager-api/index.html +++ b/docs/1.1.0/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/jdbc_api/index.html b/docs/1.1.0/api/jdbc_api/index.html index c7f28ad349c..ab359277f3c 100644 --- a/docs/1.1.0/api/jdbc_api/index.html +++ b/docs/1.1.0/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/linkis_task_operator/index.html b/docs/1.1.0/api/linkis_task_operator/index.html index d2b0af7f5f7..17fc2e2d36b 100644 --- a/docs/1.1.0/api/linkis_task_operator/index.html +++ b/docs/1.1.0/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit for Execution#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {"variable": {}, "configuration": {}},    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    {    "executionContent": {"code": "show tables", "runType": "sql"},    "params": {"variable": {}, "configuration": {}},    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.hql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}

    -Return to example

    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Return to example

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Return example, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress#

    • Interface /api/rest_j/v1/entrance/${execID}/progress

    • Submission method GET

    • Return to example

    {  "method": "/api/rest_j/v1/entrance/{execID}/progress",  "status": 0,  "message": "Return progress information",  "data": {    "execID": "${execID}",    "progress": 0.2,    "progressInfo": [        {        "id": "job-1",        "succeedTasks": 2,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        },        {        "id": "job-2",        "succeedTasks": 5,        "failedTasks": 0,        "runningTasks": 5,        "totalTasks": 10        }    ]  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/login_api/index.html b/docs/1.1.0/api/login_api/index.html index b629ff8d36f..a5b971bd485 100644 --- a/docs/1.1.0/api/login_api/index.html +++ b/docs/1.1.0/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.1.0/api/overview/index.html b/docs/1.1.0/api/overview/index.html index bcbdf9baaf3..e3afbb9c576 100644 --- a/docs/1.1.0/api/overview/index.html +++ b/docs/1.1.0/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/add_an_engine_conn/index.html b/docs/1.1.0/architecture/add_an_engine_conn/index.html index 040ff88f120..8242b6d0f3f 100644 --- a/docs/1.1.0/architecture/add_an_engine_conn/index.html +++ b/docs/1.1.0/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/commons/message_scheduler/index.html b/docs/1.1.0/architecture/commons/message_scheduler/index.html index 2e691e00219..1a061ff48a5 100644 --- a/docs/1.1.0/architecture/commons/message_scheduler/index.html +++ b/docs/1.1.0/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/commons/rpc/index.html b/docs/1.1.0/architecture/commons/rpc/index.html index 89e8f5ebffc..fb7bd2b7da7 100644 --- a/docs/1.1.0/architecture/commons/rpc/index.html +++ b/docs/1.1.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html index 716eec1f1e8..eba8df82ea5 100644 --- a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html index e5c6ea54c0b..f4fce9d7e1c 100644 --- a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 77f5baf55fa..e6680974198 100644 --- a/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/entrance/index.html b/docs/1.1.0/architecture/computation_governance_services/entrance/index.html index a603953a322..7d244688896 100644 --- a/docs/1.1.0/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html index 8a57170b894..41da9afbe0c 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 2d79b2836a7..6473017f1b7 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 955c737ff78..7694920f7a5 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html index 7e46db8eb07..e41141434a0 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 7c2075537be..5d099e63298 100644 --- a/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/computation_governance_services/overview/index.html b/docs/1.1.0/architecture/computation_governance_services/overview/index.html index a0b81ea38a3..602d1563725 100644 --- a/docs/1.1.0/architecture/computation_governance_services/overview/index.html +++ b/docs/1.1.0/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html index 82900ab5edf..024a79e2590 100644 --- a/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html index 427d3d47bff..6b673786bfd 100644 --- a/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html b/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html index 1b5835da786..d79a7e336ba 100644 --- a/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/microservice_governance_services/overview/index.html b/docs/1.1.0/architecture/microservice_governance_services/overview/index.html index ed0ea4c6d78..fc86ba39b1d 100644 --- a/docs/1.1.0/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.1.0/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/overview/index.html b/docs/1.1.0/architecture/overview/index.html index 1ae5ccee6c2..8f3f5c51a9e 100644 --- a/docs/1.1.0/architecture/overview/index.html +++ b/docs/1.1.0/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 340b2324ba4..dd8e9a4b548 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html b/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html index 40468aac5b4..83a6be0513f 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html index 56e07968959..dbad3461554 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 7d174025f3f..ee4f244953a 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html index 26fa8461dda..14608649ba2 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index fdd58b3503b..a686a16b940 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html index b794d22b5cd..f5b9d8e62ed 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 3cddd0dea1f..a509b6300db 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html index 8857b9dee3a..d55d9a7164f 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html index d0287b34c43..a85aa409e52 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html b/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html index da690e4b937..cdfae4690b7 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis-ps-dm-datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html b/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html index ab777d6941d..cb0849a6974 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/overview/index.html b/docs/1.1.0/architecture/public_enhancement_services/overview/index.html index bfa190610f4..eaf9043de67 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html b/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html index 107847cdc9f..c30ab6a0352 100644 --- a/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/cluster_deployment/index.html b/docs/1.1.0/deployment/cluster_deployment/index.html index a841bca627b..1cb9f8ba1c0 100644 --- a/docs/1.1.0/deployment/cluster_deployment/index.html +++ b/docs/1.1.0/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ Replicas will also display the replica nodes adjacent to the cluster.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html b/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html index eed96a4e1ac..edd10045c08 100644 --- a/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/installation_hierarchical_structure/index.html b/docs/1.1.0/deployment/installation_hierarchical_structure/index.html index 6fb869d3cb8..5d2e802fbb2 100644 --- a/docs/1.1.0/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.1.0/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html b/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html index 1dda95ebbf5..d14c5114418 100644 --- a/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/quick_deploy/index.html b/docs/1.1.0/deployment/quick_deploy/index.html index 7b990acd2d1..2430d84ebf4 100644 --- a/docs/1.1.0/deployment/quick_deploy/index.html +++ b/docs/1.1.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html index c1929642be7..0c501d20c62 100644 --- a/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/start_metadatasource/index.html b/docs/1.1.0/deployment/start_metadatasource/index.html index c9769e89fcd..b73ccf4ad45 100644 --- a/docs/1.1.0/deployment/start_metadatasource/index.html +++ b/docs/1.1.0/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/deployment/web_install/index.html b/docs/1.1.0/deployment/web_install/index.html index d07d57b7878..e24ac53eda0 100644 --- a/docs/1.1.0/deployment/web_install/index.html +++ b/docs/1.1.0/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_compile_and_package/index.html b/docs/1.1.0/development/linkis_compile_and_package/index.html index caa0a79b38c..a6237321527 100644 --- a/docs/1.1.0/development/linkis_compile_and_package/index.html +++ b/docs/1.1.0/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_config/index.html b/docs/1.1.0/development/linkis_config/index.html index ca7f61d8530..7bdb573d1b3 100644 --- a/docs/1.1.0/development/linkis_config/index.html +++ b/docs/1.1.0/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_debug/index.html b/docs/1.1.0/development/linkis_debug/index.html index 730d47c2069..305eabd518d 100644 --- a/docs/1.1.0/development/linkis_debug/index.html +++ b/docs/1.1.0/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -44,7 +44,7 @@ [linkis-cg-engineplugin]nohup java -DserviceName=linkis-cg-engineplugin -Xmx512M -XX:+UseG1GC -Xloggc:/data/LinkisInstallDir/logs/linkis-cg-engineplugin-gc.log -cp /data/LinkisInstallDir/conf/:/data/LinkisInstallDir /lib/linkis-commons/public-module/*:/data/LinkisInstallDir/lib/linkis-computation-governance/linkis-cg-engineplugin/* org.apache.linkis.engineplugin.server.LinkisEngineConnPluginServer 2>&1> /data /LinkisInstallDir/logs/linkis-cg-engineplugin.out &

    Remote debugging service steps#

    todo

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/linkis_debug_in_mac/index.html b/docs/1.1.0/development/linkis_debug_in_mac/index.html index 7638079f1c9..dffeaf72624 100644 --- a/docs/1.1.0/development/linkis_debug_in_mac/index.html +++ b/docs/1.1.0/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/development/new_engine_conn/index.html b/docs/1.1.0/development/new_engine_conn/index.html index 7be6c8182b0..3d755b507ca 100644 --- a/docs/1.1.0/development/new_engine_conn/index.html +++ b/docs/1.1.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -53,7 +53,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.1.0/development/web_build/index.html b/docs/1.1.0/development/web_build/index.html index 45283cef7bb..a20f510496e 100644 --- a/docs/1.1.0/development/web_build/index.html +++ b/docs/1.1.0/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/flink/index.html b/docs/1.1.0/engine_usage/flink/index.html index fae5128cd75..6363e64e3fd 100644 --- a/docs/1.1.0/engine_usage/flink/index.html +++ b/docs/1.1.0/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/hive/index.html b/docs/1.1.0/engine_usage/hive/index.html index ec9299edf7d..7111acf2c04 100644 --- a/docs/1.1.0/engine_usage/hive/index.html +++ b/docs/1.1.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/jdbc/index.html b/docs/1.1.0/engine_usage/jdbc/index.html index 77a1c1dd989..91f8008b53c 100644 --- a/docs/1.1.0/engine_usage/jdbc/index.html +++ b/docs/1.1.0/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/overview/index.html b/docs/1.1.0/engine_usage/overview/index.html index 9bb007d04c5..a98d3fc33c9 100644 --- a/docs/1.1.0/engine_usage/overview/index.html +++ b/docs/1.1.0/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/python/index.html b/docs/1.1.0/engine_usage/python/index.html index a0c13ca7481..5665b7ba24e 100644 --- a/docs/1.1.0/engine_usage/python/index.html +++ b/docs/1.1.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/shell/index.html b/docs/1.1.0/engine_usage/shell/index.html index df13b400595..ac26fa7dd92 100644 --- a/docs/1.1.0/engine_usage/shell/index.html +++ b/docs/1.1.0/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.1.0/engine_usage/spark/index.html b/docs/1.1.0/engine_usage/spark/index.html index 98be8113817..a2e9a5eacf3 100644 --- a/docs/1.1.0/engine_usage/spark/index.html +++ b/docs/1.1.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.1.0/introduction/index.html b/docs/1.1.0/introduction/index.html index de9f8e90f51..b0f75ffd984 100644 --- a/docs/1.1.0/introduction/index.html +++ b/docs/1.1.0/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.1.0/release/index.html b/docs/1.1.0/release/index.html index b4aa7ec1c0e..09d54367135 100644 --- a/docs/1.1.0/release/index.html +++ b/docs/1.1.0/release/index.html @@ -7,7 +7,7 @@ Version Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Version Overview

    Configuration Item#

    Module Name (Service Name)TypeParameter NameDefault ValueDescription
    ps-metadatamanagerNewwds.linkis.server.mdm.service.lib.dir/lib/linkis-public-enhancements/linkis-ps-metadatamanager/serviceSet the relative path to load the data source jar package, will be called by reflection
    ps-metadatamanagerNewwds.linkis.server.mdm.service.instance.expire-in-seconds60Set the expiration time for loading sub-services, after which the service will not be loaded
    ps-metadatamanagerNewwds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the name of the data source information
    ps-metadatamanagerNewwds.linkis.server.mdm.service.app.namelinkis-ps-metadatamanagerService name for setting metadata information
    ps-metadatamanagerNewwds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.userhadoopSet the access user of hive service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.kerberos.krb5.path""Set the kerberos krb5 path used by the hive service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.temp.locationclasspath:/tmpSet the temporary path of kafka and hive
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.DriverSet the driver of mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sSet the url format of mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.connect.timeout3000Set the connection timeout time for mysql service to connect to mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.sql.socket.timeout6000Set the socket timeout time for mysql service to open mysql service
    ps-metadatamanagerNewwds.linkis.server.mdm.service.temp.location/tmp/keytabSet the local temporary storage path of the service, mainly to store the authentication files downloaded from the bml material service
    ps-data-source-managerNewwds.linkis.server.dsm.auth.adminhadoopdatasourcemanager part of the interface permission authentication user
    cg-engineconnmanagerModifiedwds.linkis.engineconn.max.free.time1h -> 0.5hMaximum idle time of EngineConn changed from 1h to 0.5h

    DB Table Changes#

    For details, see the upgrade schemadb/upgrade/1.1.0_schema file in the corresponding branch of the code repository (https://github.com/apache/incubator-linkis).

    - + \ No newline at end of file diff --git a/docs/1.1.0/tags/index.html b/docs/1.1.0/tags/index.html index 05d1f93ec71..f58aabcd02d 100644 --- a/docs/1.1.0/tags/index.html +++ b/docs/1.1.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html b/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html index 7768a3181c1..4e5aa3551bd 100644 --- a/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/1.1.0/tuning_and_troubleshooting/overview/index.html b/docs/1.1.0/tuning_and_troubleshooting/overview/index.html index 85c99bfe4bc..f51f4f2d811 100644 --- a/docs/1.1.0/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.1.0/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html b/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html index bad65a24d9a..75b664abe3d 100644 --- a/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 295a51610f5..9d210990070 100644 --- a/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.1.0/upgrade/upgrade_guide/index.html b/docs/1.1.0/upgrade/upgrade_guide/index.html index 1809b1d716d..31f10035139 100644 --- a/docs/1.1.0/upgrade/upgrade_guide/index.html +++ b/docs/1.1.0/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/console_manual/index.html b/docs/1.1.0/user_guide/console_manual/index.html index e304d9314b0..c74effa5ea9 100644 --- a/docs/1.1.0/user_guide/console_manual/index.html +++ b/docs/1.1.0/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.0

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/how_to_use/index.html b/docs/1.1.0/user_guide/how_to_use/index.html index bfa5b68962a..0b92cad0d44 100644 --- a/docs/1.1.0/user_guide/how_to_use/index.html +++ b/docs/1.1.0/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/linkis-datasource-client/index.html b/docs/1.1.0/user_guide/linkis-datasource-client/index.html index b2d7daef2d4..73605c11a5c 100644 --- a/docs/1.1.0/user_guide/linkis-datasource-client/index.html +++ b/docs/1.1.0/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/linkiscli_manual/index.html b/docs/1.1.0/user_guide/linkiscli_manual/index.html index 005e647f512..204639e2940 100644 --- a/docs/1.1.0/user_guide/linkiscli_manual/index.html +++ b/docs/1.1.0/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/overview/index.html b/docs/1.1.0/user_guide/overview/index.html index 37004bef05b..37d3a64d7ba 100644 --- a/docs/1.1.0/user_guide/overview/index.html +++ b/docs/1.1.0/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.0/user_guide/sdk_manual/index.html b/docs/1.1.0/user_guide/sdk_manual/index.html index 155afab3825..c93a9fc2ba9 100644 --- a/docs/1.1.0/user_guide/sdk_manual/index.html +++ b/docs/1.1.0/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/data-source-manager-api/index.html b/docs/1.1.1/api/http/data-source-manager-api/index.html index 4bd3d7bcc8e..82b1bcf2f25 100644 --- a/docs/1.1.1/api/http/data-source-manager-api/index.html +++ b/docs/1.1.1/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html b/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html index 083900dc463..8e7b5e13704 100644 --- a/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html +++ b/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ Engine Material Refresh Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "msg": "Refresh successfully"    }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/metadatamanager-api/index.html b/docs/1.1.1/api/http/metadatamanager-api/index.html index cdb6190494e..828d4933e3c 100644 --- a/docs/1.1.1/api/http/metadatamanager-api/index.html +++ b/docs/1.1.1/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/http/udf-api/index.html b/docs/1.1.1/api/http/udf-api/index.html index 5726aade310..b6dbb09419f 100644 --- a/docs/1.1.1/api/http/udf-api/index.html +++ b/docs/1.1.1/api/http/udf-api/index.html @@ -7,7 +7,7 @@ UDF接口 | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    UDF接口

    UDF移交#

    基本信息#

    Path: /api/rest_j/v1/udf/handover

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    handoverUserstring必须被移交用户

    UDF修改#

    基本信息#

    Path: /api/rest_j/v1/udf/update

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfUpdateVoobject必须
    ├─ idnumber必须
    ├─ udfNamestring必须不能修改
    ├─ udfTypenumber必须不能修改
    ├─ descriptionstring必须
    ├─ pathstring必须jar类型udf采用文件路径方式上传
    ├─ useFormatstring必须
    ├─ registerFormatstring必须
    ### 返回数据
    名称类型是否必须默认值备注其他信息

    UDF共享用户列表#

    基本信息#

    Path: /api/rest_j/v1/udf/getSharedUsers

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    ### 返回数据
    名称类型是否必须默认值备注其他信息
    sharedUsersstring []必须

    item 类型: string

    ├─ 非必须

    UDF删除#

    基本信息#

    Path: /api/rest_j/v1/udf/delete/{id}

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    路径参数

    参数名称示例备注
    id100udf id

    Body

    名称类型是否必须默认值备注其他信息

    UDF新增#

    基本信息#

    Path: /api/rest_j/v1/udf/add

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfAddVoobject必须
    ├─ udfNamestring必须
    ├─ udfTypenumber必须
    ├─ descriptionstring必须
    ├─ pathstring必须jar类型udf采用文件路径方式上传
    ├─ sharedboolean非必须不用传
    ├─ useFormatstring必须
    ├─ expireboolean非必须不用传
    ├─ loadboolean必须
    ├─ registerFormatstring必须
    ├─ treeIdnumber非必须不用传
    ├─ sysstring必须系统:暂时均为:“IDE”
    ├─ clusterNamestring必须集群,暂时均为“all”
    ├─ directorystring必须个人函数的一级分类目录

    返回数据#

    名称类型是否必须默认值备注其他信息

    UDF查看源码#

    基本信息#

    Path: /api/rest_j/v1/udf/downloadUdf

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须

    返回数据#

    名称类型是否必须默认值备注其他信息
    methodstring必须
    statusnumber必须
    messagestring必须
    dataobject必须
    ├─ contentstring必须udf内容

    UDF版本发布#

    基本信息#

    Path: /api/rest_j/v1/udf/publish

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须发布的版本:v000005

    UDF共享#

    基本信息#

    Path: /api/rest_j/v1/udf/shareUDF

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfInfoobject必须
    ├─ idnumber必须
    ├─ udfNamestring必须
    ├─ udfTypenumber必须
    sharedUsersstring []必须被共享用户列表

    item 类型: string

    ├─ 非必须

    UDF管理页面#

    注:只能看到用户自己创建的udf

    基本信息#

    Path: /api/rest_j/v1/udf/managerPages

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfNamestring非必须
    udfTypestring必须逗号分割的字符串,如:0,1,2
    createUserstring非必须
    curPagenumber必须第几页
    pageSizenumber必须记录个数

    返回数据#

    名称类型是否必须默认值备注其他信息
    infoListobject []必须

    item 类型: object

    ├─ idnumber必须
    ├─ createUserstring必须udf创建者
    ├─ udfNamestring必须
    ├─ udfTypestring必须
    ├─ expireboolean必须udf是否过期
    ├─ sharedboolean必须是否为共享udf
    ├─ treeIdnumber必须
    ├─ sysstring必须系统,例如:dss
    ├─ clusterNamestring必须集群,暂时为all
    ├─ createTimenumber必须
    ├─ updateTimenumber必须
    ├─ pathstring必须用户上一次上传的路径,作展示用
    ├─ registerFormatstring必须
    ├─ useFormatstring必须
    ├─ descriptionstring必须
    ├─ operationStatusobject必须分类
    ├─ canUpdateboolean必须可否编辑
    ├─ canShareboolean必须可否共享
    ├─ canPublishboolean必须可否发布
    ├─ canDeleteboolean必须可否删除
    ├─ canExpireboolean必须可否过期
    ├─ canHandoverboolean必须可否移交
    totalPagenumber必须总页数
    field_1string必须
    totalnumber必须总条数

    UDF过期#

    基本信息#

    Path: /api/rest_j/v1/udf/setExpire

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须

    返回数据#

    名称类型是否必须默认值备注其他信息

    udf文件下载到本地#

    基本信息#

    Path: /api/rest_j/v1/udf/downloadToLocal

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须

    返回数据#

    名称类型是否必须默认值备注其他信息

    版本列表查看#

    基本信息#

    Path: /api/rest_j/v1/udf/versionList

    Method: GET

    接口描述:

    请求参数#

    Query

    参数名称是否必须示例备注
    udfId100

    返回数据#

    名称类型是否必须默认值备注其他信息
    versionListobject []非必须

    item 类型: object

    ├─ idnumber非必须
    ├─ udfIdnumber非必须
    ├─ pathstring非必须
    ├─ bmlResourceIdstring非必须
    ├─ bmlResourceVersionstring非必须
    ├─ isPublishedboolean非必须
    ├─ registerFormatstring非必须
    ├─ useFormatstring非必须
    ├─ descriptionstring非必须
    ├─ createTimenumber非必须
    ├─ expireboolean非必须
    ├─ createUserstring非必须

    版本回退#

    基本信息#

    Path: /api/rest_j/v1/udf/rollback

    Method: POST

    接口描述:

    请求参数#

    Headers

    参数名称参数值是否必须示例备注
    Content-Typeapplication/json

    Body

    名称类型是否必须默认值备注其他信息
    udfIdnumber必须
    versionstring必须回退版本

    获取udf用户列表#

    基本信息#

    Path: /api/rest_j/v1/udf/allUdfUsers

    Method: GET

    接口描述:

    请求参数#

    返回数据#

    名称类型是否必须默认值备注其他信息
    udfUsersstring []必须

    item 类型: string

    ├─ 非必须

    获取用户个人函数的一级分类#

    基本信息#

    Path: /api/rest_j/v1/udf/userDirectory

    Method: GET

    接口描述:

    请求参数#

    Query

    参数名称是否必须示例备注
    categoryudf必须是"udf"或"function",分别代表获取udf函数的一级分类和方法函数的一级分类

    返回数据#

    名称类型是否必须默认值备注其他信息
    userDirectorystring []必须分类名组成的列表

    item 类型: string

    ├─ 非必须
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/jdbc_api/index.html b/docs/1.1.1/api/jdbc_api/index.html index 0b3f32f7759..d386f11e9f2 100644 --- a/docs/1.1.1/api/jdbc_api/index.html +++ b/docs/1.1.1/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/linkis_task_operator/index.html b/docs/1.1.1/api/linkis_task_operator/index.html index 71a9c0f8462..ba1a6659c6f 100644 --- a/docs/1.1.1/api/linkis_task_operator/index.html +++ b/docs/1.1.1/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    {  "method": "",  "status": 0,  "message": "",  "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit task#

    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    • Request Parameters

    {  "executionContent": {    "code": "show tables",    "runType": "sql"  },  "params": {    "variable": {// task variable       "testvar": "hello"     },    "configuration": {      "runtime": {// task runtime params         "jdbc.url": "XX"      },      "startup": { // ec start up params         "spark.executor.cores": "4"      }    }  },  "source": { //task source information    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "spark-2.4.3",    "userCreator": "hadoop-IDE"  }}

    -Sample Response

    { "method": "/api/rest_j/v1/entrance/submit", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Sample Response, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress and resource#

    • Interface /api/rest_j/v1/entrance/${execID}/progressWithResource

    • Submission method GET

    • Sample Response

    {  "method": "/api/entrance/exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2/progressWithResource",  "status": 0,  "message": "OK",  "data": {    "yarnMetrics": {      "yarnResource": [        {          "queueMemory": 9663676416,          "queueCores": 6,          "queueInstances": 0,          "jobStatus": "COMPLETED",          "applicationId": "application_1655364300926_69504",          "queue": "default"        }      ],      "memoryPercent": 0.009,      "memoryRGB": "green",      "coreRGB": "green",      "corePercent": 0.02    },    "progress": 0.5,    "progressInfo": [      {        "succeedTasks": 4,        "failedTasks": 0,        "id": "jobId-1(linkis-spark-mix-code-1946915)",        "totalTasks": 6,        "runningTasks": 0      }    ],    "execID": "exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2"  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}

    6. Get task info#

    • Interface /api/rest_j/v1/jobhistory/{id}/get

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idtask idpathtruestring
    • Sample Response
    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    7. Get result set info#

    Support for multiple result sets

    • Interface /api/rest_j/v1/filesystem/getDirFileTrees

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult directoryquerytruestring
    • Sample Response
    {  "method": "/api/filesystem/getDirFileTrees",  "status": 0,  "message": "OK",  "data": {    "dirFileTrees": {      "name": "1946923",      "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923",      "properties": null,      "children": [        {          "name": "_0.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_0.dolphin",//result set 1          "properties": {            "size": "7900",            "modifytime": "1657113288360"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        },        {          "name": "_1.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_1.dolphin",//result set 2          "properties": {            "size": "7900",            "modifytime": "1657113288614"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        }      ],      "isLeaf": false,      "parentPath": null    }  }}

    8. Get result content#

    • Interface /api/rest_j/v1/filesystem/openFile

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult pathquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref
    • Sample Response
    {  "method": "/api/filesystem/openFile",  "status": 0,  "message": "OK",  "data": {    "metadata": [      {        "columnName": "count(1)",        "comment": "NULL",        "dataType": "long"      }    ],    "totalPage": 0,    "totalLine": 1,    "page": 1,    "type": "2",    "fileContent": [      [        "28"      ]    ]  }}

    9. Get Result by stream#

    Get the result as a CSV or Excel file

    • Interface /api/rest_j/v1/filesystem/resultsetToExcel

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetcharsetqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitrow limitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file type csv or excelqueryfalsestring
    pathresult pathqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring
    • Response
    binary stream

    10. Compatible with 0.x task submission interface#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    • Request Parameters
    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {      "variable": {// task variable         "testvar": "hello"      },      "configuration": {        "runtime": {// task runtime params           "jdbc.url": "XX"        },        "startup": { // ec start up params           "spark.executor.cores": "4"        }      }    },    "source": { //task source information      "scriptPath": "file:///tmp/hadoop/test.sql"    },    "labels": {      "engineType": "spark-2.4.3",      "userCreator": "hadoop-IDE"    },    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Sample Response
    {  "method": "/api/rest_j/v1/entrance/execute",  "status": 0,  "message": "Request executed successfully",  "data": {    "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",    "taskID": "123"  }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/login_api/index.html b/docs/1.1.1/api/login_api/index.html index 6c39d449ea5..b6024bf71c7 100644 --- a/docs/1.1.1/api/login_api/index.html +++ b/docs/1.1.1/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.1.1/api/overview/index.html b/docs/1.1.1/api/overview/index.html index 367ee2dae58..83c2e20f71c 100644 --- a/docs/1.1.1/api/overview/index.html +++ b/docs/1.1.1/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/add_an_engine_conn/index.html b/docs/1.1.1/architecture/add_an_engine_conn/index.html index 1028ee76f66..166a575c81f 100644 --- a/docs/1.1.1/architecture/add_an_engine_conn/index.html +++ b/docs/1.1.1/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Add an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/commons/message_scheduler/index.html b/docs/1.1.1/architecture/commons/message_scheduler/index.html index d818db190ef..56efa6dade8 100644 --- a/docs/1.1.1/architecture/commons/message_scheduler/index.html +++ b/docs/1.1.1/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/commons/rpc/index.html b/docs/1.1.1/architecture/commons/rpc/index.html index 83ae79ad9a2..ee3ffa2aafd 100644 --- a/docs/1.1.1/architecture/commons/rpc/index.html +++ b/docs/1.1.1/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html index 2c9a98c2983..cff69f3828d 100644 --- a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 48bec32321b..0ef6f5bc5d5 100644 --- a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 96d6ddf2ffc..81f39a5033d 100644 --- a/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/entrance/index.html b/docs/1.1.1/architecture/computation_governance_services/entrance/index.html index e0a380ecf4f..c3a90ccdf6c 100644 --- a/docs/1.1.1/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html index 9fa84447f58..3841711b123 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 3d99b4ea10c..72ddaaefe60 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html index a415cf96bf9..c985984f2ad 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html index 898c9092855..ef2c218ef1e 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 89c13c1a98d..36bf86505f4 100644 --- a/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/computation_governance_services/overview/index.html b/docs/1.1.1/architecture/computation_governance_services/overview/index.html index 695eaef2770..7f0de0ab4ff 100644 --- a/docs/1.1.1/architecture/computation_governance_services/overview/index.html +++ b/docs/1.1.1/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html index 8938f5e39a2..45a6d71c0c0 100644 --- a/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html b/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html index 5fe9cba3942..80985bf26b2 100644 --- a/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html b/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html index dce5e647a1c..7b5b2d99144 100644 --- a/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/microservice_governance_services/overview/index.html b/docs/1.1.1/architecture/microservice_governance_services/overview/index.html index 8343ddd73cf..d92a212092a 100644 --- a/docs/1.1.1/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.1.1/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/overview/index.html b/docs/1.1.1/architecture/overview/index.html index 0118361e8c5..5603d013fd3 100644 --- a/docs/1.1.1/architecture/overview/index.html +++ b/docs/1.1.1/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/proxy_user/index.html b/docs/1.1.1/architecture/proxy_user/index.html index b2774e0f4fb..113f12eb74e 100644 --- a/docs/1.1.1/architecture/proxy_user/index.html +++ b/docs/1.1.1/architecture/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -18,7 +18,7 @@
    • The relevant interface of linkis needs to be able to identify the proxy user information based on the original UserName obtained, and use the proxy user to perform various operations. And record the audit log, including the user's task execution operation, download operation
    • When the task is submitted for execution, the entry service needs to modify the executing user to be the proxy user

    5 Things to Consider & Note#

    • Users are divided into proxy users and non-proxy users. Users of proxy type cannot perform proxying to other users again.
    • It is necessary to control the list of logged-in users and system users who can be proxied, to prohibit the occurrence of arbitrary proxies, and to avoid uncontrollable permissions. It is best to support database tables to configure, and can be directly modified to take effect without restarting the service
    • Separately record log files containing proxy user operations, such as proxy execution, function update, etc. All proxy user operations of PublicService are recorded in the log, which is convenient for auditing
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 59b924cd23e..ec4c3a64660 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html b/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html index 612c216248c..daa0471fba0 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html index 5ff81891e8d..6cfad0f4519 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 9263d119d23..09f43117046 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html index 7196e5fa7f7..e89dd664b48 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index f1e6eec7684..8c24f640a72 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 2288008527e..517bb5ed047 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index c02f4d88e9e..ce2e514cbe5 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html index 3189db673b2..68e7bb1c68b 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html index f02ab8481e5..0330d6f32e8 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html b/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html index 3b1823152f2..ef0e28b0c99 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis_ps_dm_datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html b/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html index 496a97b9014..a5874793d79 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/overview/index.html b/docs/1.1.1/architecture/public_enhancement_services/overview/index.html index 497774542ee..cb6ec1ce013 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html b/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html index 659d76ba1d6..c0e9af4e2b8 100644 --- a/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/cluster_deployment/index.html b/docs/1.1.1/deployment/cluster_deployment/index.html index e846559b82c..72503513ef4 100644 --- a/docs/1.1.1/deployment/cluster_deployment/index.html +++ b/docs/1.1.1/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -26,7 +26,7 @@ Linux clear process sudo kill - 9 process number

    4. matters needing attention#

    4.1 It is best to start all services at the beginning, because there are dependencies between services. If some services do not exist and the corresponding backup cannot be found through Eureka, the service will fail to start. After the service fails to start, it will not restart automatically. Wait until the alternative service is added, and then close the relevant services#

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html b/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html index 99285172284..0d0a3ce6899 100644 --- a/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/installation_hierarchical_structure/index.html b/docs/1.1.1/deployment/installation_hierarchical_structure/index.html index 32fdcd00b08..38bd8b4a95c 100644 --- a/docs/1.1.1/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.1.1/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html b/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html index cfe8f91f3c0..fbc812061f6 100644 --- a/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/linkis_scriptis_install/index.html b/docs/1.1.1/deployment/linkis_scriptis_install/index.html index 5e4a5a2f554..7bfa8dc8dfa 100644 --- a/docs/1.1.1/deployment/linkis_scriptis_install/index.html +++ b/docs/1.1.1/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ Installation and deployment of tool scriptis | Apache Linkis - + @@ -28,7 +28,7 @@

    After modifying the configuration, reload the nginx configuration

    sudo nginx -s reload

    Note the difference between root and alias in nginx

    • The result of root processing is: root path + location path
    • The result of alias processing is to replace the location path with the alias path
    • Alias is the definition of a directory alias, and root is the definition of the top-level directory

    4. scriptis Use steps#

    4.1 Log in to the linkis management console normally#

    #http://10.10.10.10:8080/#/http://nginxIp:port/#/

    Because scripts requires login verification, you need to log in first to get the cookie.

    4.2 Visit the scripts page after successful login#

    #http://10.10.10.10:8080/scriptis/http://nginxIp:port/scriptis/

    Nginxip:nginx server IP, port:linkis management console nginx configuration start port number, scripts is the location address configured for the static file nginx of the requested scripts project (customizable)

    4.3 use scriptis#

    Take creating an SQL query task as an example.

    step1 New script

    design sketch

    step2 Enter the statement to query

    design sketch

    step3 function

    design sketch

    shep4 View results

    design sketch

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/quick_deploy/index.html b/docs/1.1.1/deployment/quick_deploy/index.html index 9805ecb6999..25209da406b 100644 --- a/docs/1.1.1/deployment/quick_deploy/index.html +++ b/docs/1.1.1/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    Notice that if you use DSS or other projects that rely on linkis version < 1.1.1, you also need to modify the ${LINKIS_HOME}/conf/linkis.properties file:

    echo "wds.linkis.session.ticket.key=bdp-user-ticket-id" >> linkis.properties

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html index 879d9c2d26f..3bb0134a353 100644 --- a/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/start_metadatasource/index.html b/docs/1.1.1/deployment/start_metadatasource/index.html index 006fefc95b9..4fe9ed7e212 100644 --- a/docs/1.1.1/deployment/start_metadatasource/index.html +++ b/docs/1.1.1/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html b/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html index c718c2837e5..0ac6dd0b6ca 100644 --- a/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html +++ b/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ installation package directory structure | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.1/deployment/web_install/index.html b/docs/1.1.1/deployment/web_install/index.html index 05baaa9d9b1..b34fe2b4c98 100644 --- a/docs/1.1.1/deployment/web_install/index.html +++ b/docs/1.1.1/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_compile_and_package/index.html b/docs/1.1.1/development/linkis_compile_and_package/index.html index ded237fa3fe..2351151e7f0 100644 --- a/docs/1.1.1/development/linkis_compile_and_package/index.html +++ b/docs/1.1.1/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_config/index.html b/docs/1.1.1/development/linkis_config/index.html index 4a5fefe8c62..ed9c20d026a 100644 --- a/docs/1.1.1/development/linkis_config/index.html +++ b/docs/1.1.1/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_debug/index.html b/docs/1.1.1/development/linkis_debug/index.html index c5e4f3820e3..190fc137b6b 100644 --- a/docs/1.1.1/development/linkis_debug/index.html +++ b/docs/1.1.1/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -46,7 +46,7 @@ screenshot of enterprise wechat _16500167527083

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/linkis_debug_in_mac/index.html b/docs/1.1.1/development/linkis_debug_in_mac/index.html index c603dddef65..02435fd6e9c 100644 --- a/docs/1.1.1/development/linkis_debug_in_mac/index.html +++ b/docs/1.1.1/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/development/new_engine_conn/index.html b/docs/1.1.1/development/new_engine_conn/index.html index 646ac067a7f..220ffc57b0f 100644 --- a/docs/1.1.1/development/new_engine_conn/index.html +++ b/docs/1.1.1/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -53,7 +53,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.1.1/development/web_build/index.html b/docs/1.1.1/development/web_build/index.html index 049d8ff8553..4e1d2abd0c4 100644 --- a/docs/1.1.1/development/web_build/index.html +++ b/docs/1.1.1/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/flink/index.html b/docs/1.1.1/engine_usage/flink/index.html index 8fc984a32b0..88b0617b715 100644 --- a/docs/1.1.1/engine_usage/flink/index.html +++ b/docs/1.1.1/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/hive/index.html b/docs/1.1.1/engine_usage/hive/index.html index 14f0a816f06..df8d30b7491 100644 --- a/docs/1.1.1/engine_usage/hive/index.html +++ b/docs/1.1.1/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/jdbc/index.html b/docs/1.1.1/engine_usage/jdbc/index.html index 9176583c2ed..f3c15bd8afa 100644 --- a/docs/1.1.1/engine_usage/jdbc/index.html +++ b/docs/1.1.1/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/openlookeng/index.html b/docs/1.1.1/engine_usage/openlookeng/index.html index 9cc80149ad1..158f45c87f2 100644 --- a/docs/1.1.1/engine_usage/openlookeng/index.html +++ b/docs/1.1.1/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ For the openlookeng task, you only need to modify the EngineConnType and CodeType parameters in the Demo:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "openlookeng-1.5.0"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, the cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of openlookeng is as follows:

    sh ./bin/linkis-cli -engineType openlookeng-1.5.0 -codeType sql -code 'show databases;' -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/overview/index.html b/docs/1.1.1/engine_usage/overview/index.html index 19e473da00f..27dd3a704d5 100644 --- a/docs/1.1.1/engine_usage/overview/index.html +++ b/docs/1.1.1/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/pipeline/index.html b/docs/1.1.1/engine_usage/pipeline/index.html index 7b704768b3d..410675b144e 100644 --- a/docs/1.1.1/engine_usage/pipeline/index.html +++ b/docs/1.1.1/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ pipeline engine | Apache Linkis - + @@ -20,7 +20,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/python/index.html b/docs/1.1.1/engine_usage/python/index.html index 75ad26279e3..6b10acc8396 100644 --- a/docs/1.1.1/engine_usage/python/index.html +++ b/docs/1.1.1/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/shell/index.html b/docs/1.1.1/engine_usage/shell/index.html index 1d4bed530a7..9da5381ebb7 100644 --- a/docs/1.1.1/engine_usage/shell/index.html +++ b/docs/1.1.1/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.1.1/engine_usage/spark/index.html b/docs/1.1.1/engine_usage/spark/index.html index 46d9242183c..ab546262b09 100644 --- a/docs/1.1.1/engine_usage/spark/index.html +++ b/docs/1.1.1/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.1.1/introduction/index.html b/docs/1.1.1/introduction/index.html index af2fc0df8d0..08d95c62b33 100644 --- a/docs/1.1.1/introduction/index.html +++ b/docs/1.1.1/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.1.1/release/index.html b/docs/1.1.1/release/index.html index cff1eeacef3..4f1a0dcf3bf 100644 --- a/docs/1.1.1/release/index.html +++ b/docs/1.1.1/release/index.html @@ -7,7 +7,7 @@ Version overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Version overview

    Configuration Item#

    Module Name (Service Name)TypeParameter NameDefault ValueDescription
    ec-openlookengNewlinkis.openlookeng.engineconn.concurrent.limit100Concurrency Limit
    ec-openlookengNewlinkis.openlookeng.http.connectTimeout60LClient request timeout time http request based on OKhttp
    ec-openlookengNewlinkis.openlookeng.http.readTimeout60LClient read timeout HTTP request built on OKhttp
    ec-openlookengNewlinkis.openlookeng.urlhttp://127.0.0.1:8080openlookeng service
    ec-openlookengNewlinkis.openlookeng.catalogsystemcatalog
    ec-openlookengNewlinkis.openlookeng.schemaschema
    ec-openlookengNewlinkis.openlookeng.sourceglobalsource

    DB Table Changes#

    For details, see the upgrade schemadb/upgrade/1.1.1_schema file in the corresponding branch of the code repository (https://github.com/apache/incubator-linkis).

    - + \ No newline at end of file diff --git a/docs/1.1.1/table/udf-table/index.html b/docs/1.1.1/table/udf-table/index.html index 2d06760dc50..36a901407e8 100644 --- a/docs/1.1.1/table/udf-table/index.html +++ b/docs/1.1.1/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF table structure | Apache Linkis - + @@ -16,7 +16,7 @@ udf_type 3: custom function - python functionudf_type 4: custom function - scala function

    2 linkis_ps_udf_manager#

    The administrator user table of the udf function, with sharing permissions, only the front end of the udf administrator has a shared entry

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2user_namevarchar(20)YES

    ##3 linkis_ps_udf_shared_info

    udf shared record table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3user_nameusername used by the sharevarchar(50)NO

    ##4 linkis_ps_udf_tree

    Tree-level record table for udf classification

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2parentparent categorybigint(20)NO
    3nameClass name of the functionvarchar(100)YES
    4user_nameusernamevarchar(50)NO
    5descriptiondescription informationvarchar(255)YES
    6create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    7update_timetimestampNOCURRENT_TIMESTAMP
    8categorycategory distinction udf / functionvarchar(50)YES

    ##5 linkis_ps_udf_user_load

    Whether udf is the configuration loaded by default

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfoint(11)NO
    3user_nameuser ownedvarchar(50)NO

    ##6 linkis_ps_udf_version

    udf version information table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3pathThe local path of the uploaded script/jar packagevarchar(255)NO
    4bml_resource_idMaterial resource id in bmlvarchar(50)NO
    5bml_resource_versionbml material versionvarchar(20)NO
    6is_publishedwhether to publishbit(1)YES
    7register_formatregistration formatvarchar(255)YES
    8use_formatuse formatvarchar(255)YES
    9descriptionVersion descriptionvarchar(255)NO
    10create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    11md5varchar(100)YES

    ##ER diagram

    image

    - + \ No newline at end of file diff --git a/docs/1.1.1/tags/index.html b/docs/1.1.1/tags/index.html index 3482e4604ea..b1a5c68207e 100644 --- a/docs/1.1.1/tags/index.html +++ b/docs/1.1.1/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html b/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html index a878b50c5b8..48651cc74d9 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html b/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html index 10e34ef126e..1ff7fdbc440 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html @@ -7,7 +7,7 @@ Error Code | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Error Code

    Error code 01001#

    error description

    An error occurs during task execution:error code : 01001,error msg : Your task is not routed to the background ECM,Please contact the administrator

    The em of labels

    Reason 1

    Your task is not routed to the background ECM

    Solution 1

    1. Check whether the tenant label is correct

    2. If yes, contact the administrator


    Error code 01002#

    error description

    An error occurs during task execution:error code : 01002,error msg : The Linkis service load is too high. Contact the administrator to expand the capacity

    Unexpected end of file from server

    Reason 1

    The linkis service load is too high, resulting in abnormal service connection

    Solution 1

    Please contact the administrator


    Error code 01003#

    error description

    An error occurs during task execution:error code : 01003,error msg : The linkis service load is too high. Please contact the administrator for capacity expansion

    failed to ask linkis Manager Can be retried SocketTimeoutException

    Reason 1

    The link service load is too high, resulting in service connection timeout

    Solution 1

    Contact administrator


    Error code 01004#

    error description

    An error occurs during task execution:error code : 01004,error msg : The engine is killed at startup,Please contact the administrator

    [0-9]+ Killed

    Reason 1

    The engine was killed at startup because the machine load was too high

    Solution 1

    1. you can choose to retry
    2. or contact the administrator

    Error code 01005#

    error description

    An error occurs during task execution:error code : 01005,error msg : Request Yan to get the queue information and retry for 2 times still failed, please contact the administrator

    Failed to request external resourceClassCastException

    Reason 1

    Failed to request Yan to obtain queue information. This is caused by a configuration problem

    Solution 1

    Please contact the administrator


    Error code 01101#

    error description

    An error occurs during task execution:error code : 01101,error msg : ECM resources are insufficient, please contact the administrator for capacity expansion

    ECM resources are insufficient

    Reason 1

    Due to insufficient server resources, possibly during peak hours

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01102#

    error description

    An error occurs during task execution:error code : 01102,error msg : ECM memory resources are insufficient. Please contact the administrator for capacity expansion

    ECM memory resources are insufficient

    Reason 1

    Insufficient server memory resources

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01103#

    error description

    An error occurs during task execution:error code : 01103,error msg : ECM CPU resources are insufficient. Please contact the administrator for capacity expansion

    ECM CPU resources are insufficient

    Reason 1

    Insufficient server CPU resources

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01104#

    error description

    An error occurs during task execution:error code : 01104,error msg : Instance resources are insufficient. Please contact the administrator for capacity expansion

    ECM Insufficient number of instances

    Reason 1

    Insufficient server instance resources

    Solution 1

    1. you can retry the task
    2. or contact the administrator

    Error code 01105#

    error description

    An error occurs during task execution:error code : 01105,error msg : The machine is out of memory. Please contact the administrator for capacity expansion

    Cannot allocate memory

    Reason 1

    Server machine out of memory

    Solution 1

    1. you can retry the task

    2. or contact the administrator


    Error code 12001#

    error description

    An error occurs during task execution:error code : 12001,error msg : The queue CPU resource is insufficient. You can adjust the number of spark actuators

    Queue CPU resources are insufficient

    Reason 1

    The queue CPU resource is insufficient, exceeding the limit you set

    Solution 1

    • Open the DSS platform and click management console -- parameter configuration -- ide -- spark -- display advanced settings -- Walker engine resource settings (2) -- adjust the number of concurrent executors [spark.executor.instances]

    • Or adjust the upper limit of queue resource usage on the management console -- parameter configuration -- global settings


    Error code 12002#

    error description

    An error occurs during task execution:error code : 12002,error msg : The queue memory resources are insufficient. You can adjust the number of spark actuators

    Insufficient queue memory

    Reason 1

    The queue memory resource is insufficient, which exceeds the queue memory resource value you set

    Solution 1

    • Open the DSS platform and click management console -- parameter configuration -- ide -- spark -- display advanced settings -- Walker engine resource settings (2) -- adjust the number of concurrent executors [spark.executor.instances]

    • Or adjust the upper limit of queue resource usage on the management console - parameter configuration - global settings


    Error code 12003#

    error description

    An error occurs during task execution:error code : 12003,error msg : The number of queue instances exceeds the limit

    Insufficient number of queue instances

    Reason 1

    The number of queue instances exceeds the limit

    Solution 1

    • Open the DSS platform and click management console - parameter configuration - global settings - queue resources - maximum number of yarn queue instances [wds.links.rm.yarnqueue.instance.max]

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12004#

    error description

    An error occurs during task execution:error code : 12004,error msg : Global drive memory usage limit, lower drive memory can be set

    Drive memory resources are insufficient

    Reason 1

    Global drive memory exceeds maximum

    Solution 1

    • Open the DSS platform and click management console - parameter configuration - global settings - queue resources - maximum number of yarn queue instances [wds.links.rm.yarnqueue.instance.max]

    Solution 2

    • If the queue is available and the number of application instances is too low, you can contact the administrator to set

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12005#

    error description

    An error occurs during task execution:error code : 12005,error msg : If the maximum number of global drive CPUs is exceeded, idle engines can be cleaned up

    Drive core resources are insufficient

    Reason 1

    The number of global drive CPUs exceeds the maximum

    Solution 1

    • Open the DSS platform and click management console - parameter configuration - global settings - queue resources - upper limit of queue CPU [wds.links.rm.yarnqueue.cores.max]

    Solution 2

    • Clean up idle engines

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12006#

    error description

    An error occurs during task execution:error code : 12006,error msg : If the maximum number of concurrent engines is exceeded, idle engines can be cleaned up

    Insufficient number of instances

    Reason 1

    Maximum engine concurrency exceeded

    Solution 1

    • Modify the configuration and global configuration: open the DSS platform and click management console - parameter configuration - global settings - queue resources - global maximum concurrency of each engine [wds.links.rm.instance]
    • spark engine
    • hive engine
    • python engine
    • pipeline engine

    Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12008#

    error description

    An error occurs during task execution:error code : 12008,error msg : Exception in getting the yarn queue information. It may be that the yarn queue you set does not exist

    获取Yarn队列信息异常

    Reason 1

    Exception in getting Yan queue information

    Solution 1

    • If the cluster is normal and the user queue is configured incorrectly:
    • Linkis management console / parameter configuration > global settings >yarn queue name [wds.linkis.rm.yarnqueue]

    Solution 2

    • If the cluster is a new cluster, first check the cluster configuration of linkismanager

      Hadoop cluster address: http://ip:8088/cluster

      yarn cluster address:http://ip:8888/cluster/scheduler

      Remarks

    Users are not recommended to modify the global settings at will. If necessary, please contact the operation and maintenance department before modifying. Non global setting parameters, which can be modified by the user


    Error code 12009#

    error description

    An error occurs during task execution:error code : 12009,error msg : Session creation failed. The%s queue does not exist. Please check whether the queue settings are correct

    queue (\S+) is not exists in YARN

    Reason 1

    The queue does not exist. Please check whether the queue settings are correct

    Solution 1

    • The user contacts the administrator to confirm whether the queue is correct

    Error code 12010#

    error description

    An error occurs during task execution:error code : 12010,error msg : The cluster queue memory resources are insufficient. You can contact people in the group to release resources

    Insufficient cluster queue memory

    Reason 1

    Insufficient cluster queue memory resources

    Solution 1

    • Check whether the resource memory is full. The user contacts the personnel in the group to release the resource, or applies for queue expansion

    Error code 12011#

    error description

    An error occurs during task execution:error code : 12011,error msg : Cluster queue CPU resources are insufficient. You can contact people in the group to release resources

    Insufficient cluster queue cpu

    Reason 1

    Insufficient cluster queue CPU resources

    Solution 1

    • Check whether the resource CPU is full. The user contacts the personnel in the group to release the resource, or applies for queue expansion

    Error code 12013#

    error description

    An error occurs during task execution:error code : 12013,error msg : Insufficient resources cause the engine to timeout. You can retry the task

    wait for DefaultEngineConn

    Reason 1

    Starting the engine timed out due to insufficient resources

    Solution 1

    The user retries the task. If it occurs repeatedly, please contact the administrator for troubleshooting


    Error code 12014#

    error description

    An error occurs during task execution:error code : 12014,error msg : The request engine timed out, which may be caused by insufficient queue resources. Please try again

    wait for engineConn initial timeout

    Reason 1

    Request engine timed out

    Solution 1

    The user retries the task. If it occurs repeatedly, please contact the administrator for troubleshooting


    Error code 13001#

    error description

    An error occurs during task execution:error code : 13001,error msg : Java process memory overflow, it is recommended to optimize the script content

    OutOfMemoryError

    Reason 1

    Java process memory overflow

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Error code 13002#

    error description

    An error occurs during task execution:error code : 13002,error msg : The use of resources is too large. Please tune SQL or increase resources

    Container killed by YARN for exceeding memory limits

    Reason 1

    Use resources too large

    Solution 1

    • Increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13003#

    error description

    An error occurs during task execution:error code : 13003,error msg : The use of resources is too large. Please tune SQL or increase resources

    read record exception

    Reason 1

    Use resources too large

    Solution 1

    • After confirming with the administrator, the user can increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13004#

    error description

    An error occurs during task execution:error code : 13004,error msg : The engine exited unexpectedly, which may be caused by excessive use of resources

    failed because the engine quitted unexpectedly

    Reason 1

    Unexpected engine exit

    Solution 1

    Contact the administrator for troubleshooting


    Error code 13005#

    error description

    An error occurs during task execution:error code : 13005,error msg : Spark app exit may be caused by complex tasks

    Spark application has already stopped

    Reason 1

    Spark app exit may be caused by complex tasks

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Solution 2

    • After confirming with the administrator, the user can increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13006#

    error description

    An error occurs during task execution:error code : 13006,error msg : Spark context exits, which may be caused by complex tasks

    Spark application has already stopped

    Reason 1

    Spark context exits, which may be caused by complex tasks

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Solution 2

    • After confirming with the administrator, the user can increase the memory of the executor in the management console or in the task submission
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idspark -- worker resource settings (2) -- worker memory size [spark.executor.memory]

    Error code 13007#

    error description

    An error occurs during task execution:error code : 13007,error msg : Pyspark child process exited unexpectedly, which may be caused by complex tasks

    Pyspark process has stopped

    Reason 1

    Pyspark child process exited unexpectedly, which may be caused by complex tasks

    Solution 1

    • The user tries to increase the memory configuration of the Java management side. If it occurs repeatedly, please contact the administrator for troubleshooting
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idespark -- spark engine resource settings (2) -- spark engine memory [spark.driver.memory]

    Error code 21001#

    error description

    An error occurs during task execution:error code : 21001,error msg : Session creation failed, user%s cannot submit application to queue:%s, please contact the person who provided the queue to you

    User (\S+) cannot submit applications to queue (\S+)

    Reason 1

    Session creation failed, user%s cannot submit application to queue

    Solution 1

    The user queue does not have permission. Please check whether the queue configuration is wrong or apply for queue permission


    Error code 21002#

    error description

    An error occurs during task execution:error code : 21002,error msg : Failed to create Python interpreter, please contact the administrator

    initialize python executor failed

    Reason 1

    Failed to create Python interpreter, please contact the administrator

    Solution 1

    Contact the operation and maintenance personnel for troubleshooting


    Error code 21003#

    error description

    An error occurs during task execution:error code : 21003,error msg : Failed to create stand-alone Python interpreter, please contact the administrator

    PythonSession process cannot be initialized

    Reason 1

    Failed to create Python interpreter, please contact the administrator

    Solution 1

    Contact the operation and maintenance personnel for troubleshooting


    Error code 22001#

    error description

    An error occurs during task execution:error code : 22001,error msg : %S has no permission to access. Please apply for permission to open the data table. Please contact your data management personnel

    Permission denied:\suser=[a-zA-Z0-9_]+,\saccess=[A-Z]+\s,\sinode="([a-zA-Z0-9/_.]+)

    Reason 1

    Unauthorized access

    Solution 1

    • Database table permission needs to be applied to

    Error code 22003#

    error description

    An error occurs during task execution:error code : 22003,error msg : The checked database table has no permission

    Authorization failed:No privilege

    Reason 1

    Unauthorized access

    Solution 1

    • Database table permission needs to be applied to

    Error code 22004#

    error description

    An error occurs during task execution:error code : 22004,error msg : The user%s does not exist on the machine. Please confirm whether you have applied for relevant permissions

    user (\S+) does not exist

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22005#

    error description

    An error occurs during task execution:error code : 22005,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    engineConnExec.sh: Permission denied

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22006#

    error description

    An error occurs during task execution:error code : 22006,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    at com.sun.security.auth.UnixPrincipal

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22007#

    error description

    An error occurs during task execution:error code : 22007,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    LoginException: java.lang.NullPointerException: invalid null input: name

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 22008#

    error description

    An error occurs during task execution:error code : 22008,error msg : The user does not exist on the machine. Please confirm whether you have applied for relevant permissions

    User not known to the underlying authentication module

    Reason 1

    Unauthorized access

    Solution 1

    • User bill of lading application authority

    Error code 30001#

    error description

    An error occurs during task execution:error code : 30001,error msg : Library exceeds limit

    is exceeded

    Reason 1

    Library exceeds limit

    Solution 1

    Users clean up data by themselves

    Solution 2

    Apply for database expansion


    Error code 31001#

    error description

    An error occurs during task execution:error code : 31001,error msg : User active kill task

    is killed by user

    Reason 1

    User active kill task

    Solution 1

    • If it is confirmed that the user has not actively killed, please contact the operation and maintenance personnel for troubleshooting

    Error code 31002#

    error description

    An error occurs during task execution:error code : 31002,error msg : The enginetypelabel you submitted does not have a corresponding engine version

    EngineConnPluginNotFoundException

    Reason 1

    Enginetypelabel has no corresponding engine version

    Solution 1

    • The user checks whether the enginetypelabel passed is correct. If it is correct, please contact the operation and maintenance personnel for troubleshooting
    • Inspection method of the operation and maintenance personnel: the lib/links-engineconn-plugins/ on the linkis ECP node is the local cache of all available engine plug-ins. This is not possible because the corresponding version of the engine is not available, or there are other abnormal format files in the engine file, such as Bak, you shouldn't have put zip. Zip, etc

    Error code 41001#

    error description

    An error occurs during task execution:error code : 41001,error msg : The database%s does not exist. Please check whether the referenced database is correct

    Database '([a-zA-Z_0-9]+)' not found

    Reason 1

    Database %s does not exist

    Solution 1

    • User checks whether the database exists and permissions
    • show databases


    Error code 41001#

    error description

    An error occurs during task execution:error code : 41001,error msg : The database%s does not exist. Please check whether the referenced database is correct

    Database does not exist: ([a-zA-Z_0-9]+)

    Reason 1

    Database %s does not exist

    Solution 1

    • User checks whether the database exists and permissions
    • show databases


    Error code 41003#

    error description

    An error occurs during task execution:error code : 41003,error msg : The field%s does not exist. Please check whether the referenced field is correct

    cannot resolve '(.+)' given input columns

    Reason 1

    Field %s does not exist

    Solution 1

    • User checks whether the field exists

      desc tabl_name


    Error code 41003#

    error description

    An error occurs during task execution:error code : 41003,error msg : The field%s does not exist. Please check whether the referenced field is correct

    Column '(.+)' cannot be resolved

    Reason 1

    Field %s does not exist

    Solution 1

    • User checks whether the field exists

      desc tabl_name


    Error code 41003#

    error description

    An error occurs during task execution:error code : 41003,error msg : The field%s does not exist. Please check whether the referenced field is correct

    Invalid table alias or column reference '(.+)':

    Reason 1

    Field %s does not exist

    Solution 1

    • User checks whether the field exists

      desc tabl_name


    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : Partition field%s does not exist. Please check whether the referenced table is a partition table or the partition field is incorrect

    Partition spec {(\S+)} contains non-partition columns

    Reason 1

    Partition field %s does not exist

    Solution 1

    • The user checks whether the partition field is filled in correctly

    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : Partition field%s does not exist. Please check whether the referenced table is a partition table or the partition field is incorrect

    table is not partitioned but partition spec exists:{(.+)}

    Reason 1

    Partition field %s does not exist

    Solution 1

    • The user checks whether the partition field is filled in correctly

    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : The path corresponding to the table does not exist. Please contact your data manager

    Path does not exist: viewfs

    Reason 1

    Partition path does not exist

    Solution 1

    • Please try refresh table XXX, or the kill engine runs again, but there are still exceptions. Please contact the data management personnel for troubleshooting

    Error code 41004#

    error description

    An error occurs during task execution:error code : 41004,error msg : Field%s does not exist, please check whether the referenced table%s is a partition table or the partition field is incorrect

    ([a-zA-Z_0-9]+) is not a valid partition column in table ([`.a-zA-Z_0-9]+)

    Reason 1

    Field %s does not exist

    Solution 1

    • The user checks whether the partition field is filled in correctly

    Error code 41005#

    error description

    An error occurs during task execution:error code : 41005,error msg : File %s does not exist

    Caused by:\s*java.io.FileNotFoundException

    Reason 1

    File %s does not exist

    Solution 1

    • Please try refresh table XXX, or the kill engine runs again, but there are still exceptions. Please contact the data management personnel for troubleshooting

    Error code 42003#

    error description

    An error occurs during task execution:error code : 42003,error msg : Unknown function%s, please check whether the function referenced in the code is correct

    Undefined function: '(\S+)'

    Reason 1

    Error in referenced function

    Solution 1

    • If it is a UDF, please check the function. If it is a public function, please contact the operation and maintenance personnel for troubleshooting
    • udf address

    Error code 42003#

    error description

    An error occurs during task execution:error code : 42003,error msg : Unknown function%s, please check whether the function referenced in the code is correct

    Invalid function '(\S+)'

    Reason 1

    Error in referenced function

    Solution 1

    • If it is a UDF, please check the function. If it is a public function, please contact the operation and maintenance personnel for troubleshooting
    • udf address

    Error code 42004#

    error description

    An error occurs during task execution:error code : 42004,error msg : There is a name conflict in the field%s, please check whether there is a field with the same name in the sub query

    Ambiguous column Reference '(\S+)' in subquery

    Reason 1

    Name conflict in field %s

    Solution 1

    • User checks whether there is a duplicate name field

    Error code 42004#

    error description

    An error occurs during task execution:error code : 42004,error msg : There is a name conflict in the field%s, please check whether there is a field with the same name in the sub query

    Reference '(\S+)' is ambiguous

    Reason 1

    Name conflict in field %s

    Solution 1

    • User checks whether there is a duplicate name field

    Error code 42005#

    error description

    An error occurs during task execution:error code : 42005,error msg : The field%s must specify a table or subquery alias. Please check the source of the field

    Column '(\S+)' Found in more than One Tables/Subqueries

    Reason 1

    Field does not specify a table

    Solution 1

    • User added field source

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : The table%s already exists in the database. Please delete the corresponding table and try again

    Table already exists

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : Table %s already exists in the database,Please delete the corresponding table and try again

    AnalysisException: (S+) already exists

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : Table %s already exists in the database,Please delete the corresponding table and try again

    Table (\S+) already exists

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42006#

    error description

    An error occurs during task execution:error code : 42006,error msg : Table %s already exists in the database,Please delete the corresponding table and try again

    Table or view '(\S+)' already exists in database '(\S+)'

    Reason 1

    Table %s already exists in the database

    Solution 1

    • The user needs to clean up the table and try again

    Error code 42007#

    error description

    An error occurs during task execution:error code : 42007,error msg : The number of fields in the inserted target table does not match, please check the code!

    requires that the data to be inserted have the same number of columns as the target table

    Reason 1

    Insert target table field quantity mismatch

    Solution 1

    • User check code

    Error code 42008#

    error description

    An error occurs during task execution:error code : 42008,error msg : Data type does not match, please check the code!

    due to data type mismatch: differing types in

    Reason 1

    data type mismatch

    Solution 1

    • User check code

    Error code 42009#

    error description

    An error occurs during task execution:error code : 42009,error msg : The reference of field%s is incorrect. Please check whether the field exists!

    Invalid column reference (S+)

    Reason 1

    Incorrect reference to field %s

    Solution 1

    • User checks whether the field exists

    Error code 42010#

    error description

    An error occurs during task execution:error code : 42010,error msg : Failed to extract data for field %s

    Can't extract value from (S+): need

    Reason 1

    Failed to extract data for field %s

    Solution 1

    • Check whether the selected field is incorrect

    Error code 42012#

    error description

    An error occurs during task execution:error code : 42012,error msg : Group by position 2 is not in the select list, please check the code!

    GROUP BY position (S+) is not in select list

    Reason 1

    The field of group by is not in the select list

    Solution 1

    • User check code

    Error code 42014#

    error description

    An error occurs during task execution:error code : 42014,error msg : Insert data does not specify target table field%s, please check the code!

    Cannot insert into target table because column number/types are different '(S+)'

    Reason 1

    The inserted data does not correspond to the fields of the target table

    Solution 1

    • User check code

    Error code 42016#

    error description

    An error occurs during task execution:error code : 42016,error msg : UDF function does not specify a parameter, please check the code!

    UDFArgumentException Argument expected

    Reason 1

    UDF function does not specify full parameters

    Solution 1

    • User check code

    Error code 42017#

    error description

    An error occurs during task execution:error code : 42017,error msg : Aggregate function%s cannot be written in group by, please check the code!

    aggregate functions are not allowed in GROUP BY

    Reason 1

    Aggregate function%s cannot be written in group by, please check the code!

    Solution 1

    • User check code

    Error code 43007#

    error description

    An error occurs during task execution:error code : 43007,error msg : Pyspark execution failed, possibly due to syntax error or stage failure

    Py4JJavaError: An error occurred

    Reason 1

    Syntax error or stage failure

    Solution 1

    • If it is a syntax error, you need to check the code for modification
    • If the stage fails, you can choose to retry

    Error code 43011#

    error description

    An error occurs during task execution:error code : 43011,error msg : Export excel table exceeds the maximum limit of 1048575

    Invalid row number

    Reason 1

    Data volume exceeds the limit of a single sheet

    Solution 1

    • Reduce the amount of data to export, or export to CSV format

    Error code 43040#

    error description

    An error occurs during task execution:error code : 43040,error msg : Presto query must specify data source and Library Information

    Schema must be specified when session schema is not set

    Reason 1

    Data source configuration error

    Solution 1

    • Check management console Presto data source configuration
    • Modify the configuration, open the DSS platform, and click management console -- parameter configuration -- ide -- idepresto -- data source configuration

    Error code 46001#

    error description

    An error occurs during task execution:error code : 46001,error msg : Import file address not found:%s

    java.io.FileNotFoundException: (\S+) (No such file or directory)

    Reason 1

    file does not exist

    Solution 1

    • Please check the workspace, or check whether the files in the HDFS directory exist

    Error code 46002#

    error description

    An error occurs during task execution:error code : 46002,error msg : Exception of temporary file directory permission when exporting to excel

    java.io.IOException: Permission denied(.+) at org.apache.poi.xssf.streaming.SXSSFWorkbook.createAndRegisterSXSSFSheet

    Reason 1

    Abnormal file directory or insufficient file read / write permission

    Solution 1

    • Please confirm that the file has read-write permission. If there is any abnormality, please contact the operation and maintenance personnel for handling

    Error code 46003#

    error description

    An error occurs during task execution:error code : 46003,error msg : Unable to create directory while exporting file:%s

    java.io.IOException: Mkdirs failed to create (\S+) (.+)

    Reason 1

    Unable to create directory

    Solution 1

    • Contact the operation and maintenance personnel for troubleshooting

    Error code 46004#

    error description

    An error occurs during task execution:error code : 46004,error msg : Error importing module. The system does not have a%s module. Please contact the operation and maintenance personnel to install it

    ImportError: No module named (S+)

    Reason 1

    The system does not have a %s module

    Solution 1

    • Contact the operation and maintenance personnel for troubleshooting
    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html b/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html index b85a826f50c..99675a6697f 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html @@ -7,7 +7,7 @@ Interface error troubleshooting | Apache Linkis - + @@ -22,7 +22,7 @@ lready. , ip: bdpujes110003 ,port: 9101 ,serviceKind: linkis-cg-linkismanager

    Where IP and port are the corresponding service addresses, and servicekind is the corresponding service name. If the RPC call log fails, you can use this information to find the corresponding service

    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/overview/index.html b/docs/1.1.1/tuning_and_troubleshooting/overview/index.html index 98d144758ec..cec21a42598 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html b/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html index 74252995dfc..bab0e26ced2 100644 --- a/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 3a1fca08ce6..305964ef1d0 100644 --- a/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.1.1/upgrade/upgrade_guide/index.html b/docs/1.1.1/upgrade/upgrade_guide/index.html index 46c28ff9052..21788194431 100644 --- a/docs/1.1.1/upgrade/upgrade_guide/index.html +++ b/docs/1.1.1/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/console_manual/index.html b/docs/1.1.1/user_guide/console_manual/index.html index c0a659ccd77..926475c9ca0 100644 --- a/docs/1.1.1/user_guide/console_manual/index.html +++ b/docs/1.1.1/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.1

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/how_to_use/index.html b/docs/1.1.1/user_guide/how_to_use/index.html index abbac78e8e2..b7f333f4f4f 100644 --- a/docs/1.1.1/user_guide/how_to_use/index.html +++ b/docs/1.1.1/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/linkis-datasource-client/index.html b/docs/1.1.1/user_guide/linkis-datasource-client/index.html index ad77f8c0450..d0d9516a1f5 100644 --- a/docs/1.1.1/user_guide/linkis-datasource-client/index.html +++ b/docs/1.1.1/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/linkiscli_manual/index.html b/docs/1.1.1/user_guide/linkiscli_manual/index.html index 524f729eb93..24e8b68408f 100644 --- a/docs/1.1.1/user_guide/linkiscli_manual/index.html +++ b/docs/1.1.1/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/overview/index.html b/docs/1.1.1/user_guide/overview/index.html index cc1f8ad15df..76a4629a091 100644 --- a/docs/1.1.1/user_guide/overview/index.html +++ b/docs/1.1.1/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/sdk_manual/index.html b/docs/1.1.1/user_guide/sdk_manual/index.html index d6c2d57d7a5..b7eb7133291 100644 --- a/docs/1.1.1/user_guide/sdk_manual/index.html +++ b/docs/1.1.1/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.1/user_guide/udf/index.html b/docs/1.1.1/user_guide/udf/index.html index 14ed4eaad8d..b71ef284e55 100644 --- a/docs/1.1.1/user_guide/udf/index.html +++ b/docs/1.1.1/user_guide/udf/index.html @@ -7,7 +7,7 @@ Use of UDFs | Apache Linkis - + @@ -20,7 +20,7 @@ Prerequisite: The sharing function needs to be used by the user as an administrator, otherwise the front-end page will not provide an operation entry.

    Click the share button of udf: the content box will pop up, enter the list of users you want to share (comma separated).

    Note: After sharing to others, others need to actively load the UDF before using it.

    After sharing, the shared user can find it in "Shared Function", check the load and use it.

    5 Introduction of other functions#

    5.1 UDF handover#

    For example, when the user leaves the company, it may be necessary to hand over personal udf to others. Click the Handover button, select your handover object, and click OK.

    5.2 UDF Expiration#

    For a UDF shared to others, if it has been loaded by the sharing user, the udf cannot be deleted directly, but the udf can only be marked as expired. For the time being, it is only used for marking and does not affect use.

    5.3 UDF version list#

    Click the "version list" button of a udf to view all versions of the udf. The following features are provided for each version:

    Create a new version: Copy the corresponding version to the latest version.

    Download: Download the udf file from bml to the local.

    View the source code: For the python/scala script type, you can directly view the source code, but the jar type is not supported.

    Publish: The shared udf can click to publish a certain version, so that the version will take effect for the shared user. Note: Shared users use the latest version of udf released, and individual users always use the latest version.

    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index be659ff0c30..b91f14c5999 100644 --- a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ Engine Plugin Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Engine Plugin Api

    EnginePluginRestful class

    refresh#

    Interface address:/api/rest_j/v1/engineplugin/refresh

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh a single resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ecTypetypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    refresh all#

    Interface address:/api/rest_j/v1/engineplugin/refreshAll

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh all ec resources

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index f9c2b3cfbff..df463aaf2d0 100644 --- a/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ Engine Material Refresh Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "msg": "Refresh successfully"    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html index 48df2fa70b3..654f97aba9b 100644 --- a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ Task Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Task Management

    EntranceMetricRestfulApi class

    Task management

    start task#

    Interface address:/api/rest_j/v1/entrance/api/metrics/runningtask

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Start task

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task info#

    Interface address:/api/rest_j/v1/entrance/api/metrics/taskinfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    engineTypeLabelEngine Type Labelqueryfalsestring
    useruserqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html index 93d4388ab3a..7aac9ea28ce 100644 --- a/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ Task Action | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Task Action

    EntranceRestfulApi class

    process task request#

    Interface address:/api/rest_j/v1/entrance/execute

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    The execute function handles the request submitted by the user to execute the task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonjsonbodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Submit the execute function#

    Interface address:/api/rest_j/v1/entrance/submit

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Submit execute function

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    SubmitjsonbodytrueSubmitSubmit

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    end task#

    Interface address: /api/rest_j/v1/entrance/{id}/kill

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    kill task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    End Jobs#

    Interface address: /api/rest_j/v1/entrance/{id}/killJobs

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    End Jobs

    Request example:

    {    "taskIDList": [],    "idList": []}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    idid request path generationtruestringstring
    taskIDListcollection of task IDsfalseStringString
    idListID collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/entrance/#id/killJobs",    "status": 0,    "message": "success",    "data": {        "messages": [{            "method": "",            "status": 0,            "message": "",            "data": {                "execID": ""            }        }]    }}

    task log#

    Interface address: /api/rest_j/v1/entrance/{id}/log

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get task log

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Pause task#

    Interface address:/api/rest_j/v1/entrance/{id}/pause

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Pause task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Mission progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progress

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progressWithResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Resource progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task status#

    Interface address:/api/rest_j/v1/entrance/{id}/status

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task status

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index a1cf44631b7..37f8ae10e65 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    EC Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index 7a59c46eeb8..a51b9f2ea61 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    ECM Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    ECM resource list#

    Interface address: /api/rest_j/v1/linkisManager/listAllEMs

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get a detailed list of all ECM resources, which can be queried according to conditions, and query all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    instanceinstance namequeryfalsestring
    nodeHealthyStatus, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'queryfalsestring
    ownerCreatorqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/listAllEMs",    "status": 0,    "message": "OK",    "data": {        "EMs": [{            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "instance": "",                "serviceInstance": {                    "instance": "",                    "applicationName": ""                },                "serviceName": "",                "featureKey": "",                "empty":            }],            "applicationName": "",            "instance": ":",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": ,                "instance":            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "owner": "",            "runningTasks": null,            "pendingTasks": null,            "succeedTasks": null,            "failedTasks": null,            "maxMemory": ,            "usedMemory": ,            "systemCPUUsed": null,            "systemLeftMemory": ,            "nodeHealthy": "",            "msg": "",            "startTime":        }]    }}

    Edit EMC instance#

    Interface address: /api/rest_j/v1/linkisManager/modifyEMInfo

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Edit or modify the instance under EMC management

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameEngine LabelfalseStringString
    emStatusInstance status, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'falseStringString
    instanceEngine instance namefalseStringString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionfalseStringString
    labelsThe engine instance updates the parameter content, and the collection stores the map typefalseListList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/modifyEMInfo",    "status": 0,    "message": "success",    "data": {}}

    Open engine log#

    Interface address: /api/rest_j/v1/linkisManager/openEngineLog

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Open the engine log, the stdout type engine log is opened by default

    Request example:

    {    applicationName: ""    emInstance: ""    instance: ""    parameters: {        pageSize: ,        fromLine: ,        logType: ""    }}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    emInstanceInstance nameStringfalseString
    fromLineFrom LineStringfalseString
    instanceEngine instance nameStringfalseString
    logTypeLog type, default stdout type, belonging to parametersStringfalseString
    pageSizePage SizeStringfalseString
    parametersPagination informationMapfalseMap

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/openEngineLog",    "status": 0,    "message": "OK",    "data": {        "result": {            "logPath": "",            "logs": [""],            "endLine": ,            "rows":        },        "isError": false,        "errorMsg": ""    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index 0e7205765e7..9d060b279a8 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ Engine Management | Apache Linkis - + @@ -16,7 +16,7 @@

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameThe application name, the outermost layer is an array and the engineInstance parameter is a levelfalseStringString
    engineInstanceThe name of the engine instance, the outermost layer is an array and the applicationName parameter is a levelfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 7a772011215..2964b375909 100644 --- a/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Resource Management

    RMMonitorRest class

    All user resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/allUserResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    All user resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    pagepagequeryfalseinteger(int32)
    sizesizequeryfalseinteger(int32)
    usernameusernamequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "total": 34,        "resources": [{            "id": ,            "username": "",            "creator": "",            "engineTypeWithVersion": "",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": "instance": 0            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": null,            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "createTime": ,            "updateTime": ,            "loadResourceStatus": "",            "queueResourceStatus":        }]    }}

    Application List#

    Interface address: /api/rest_j/v1/linkisManager/rm/applicationlist

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get the list of application engines in resource management

    Request example:

    {    userCreator: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    userCreatoruserCreatorquerytrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": ,    "status": ,    "message": "",    "data": {        "applications": [{            "creator": "",            "applicationList": {                "usedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "maxResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "lockedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "minResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "engineInstances": [{                    "resource": {                        "resourceType": "",                        "maxResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "minResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "usedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "lockedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "expectedResource": null,                        "leftResource": {                            "memory": ,                            "cores": ,                            "instance":                        }                    },                    "engineType": "",                    "owner": "",                    "instance": "",                    "creator": "",                    "startTime": "",                    "status": "",                    "label": ""                }]            }        }]    }}

    EngineType#

    Interface address: /api/rest_j/v1/linkisManager/rm/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    Engine Type

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Engine manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/engines

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Engine Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/queueresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue#

    Interface address: /api/rest_j/v1/linkisManager/rm/queues

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/resetResource

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Reset resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdresourceIdqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource information#

    Interface address: /api/rest_j/v1/linkisManager/rm/userresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Query resource list and detailed resource data such as usage percentage

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {            "userResources": [{            "userCreator": "",            "engineTypes": [{            "engineType": "",            "percent": ""            }],    "percent": ""        }]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html index a11bca56e59..de572d1c845 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ Context History Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Context History Service

    ContextHistoryRestfulApi class

    create history#

    Interface address:/api/rest_j/v1/contextservice/createHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Create History

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get multiple histories#

    Interface address:/api/rest_j/v1/contextservice/getHistories

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get multiple history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get history#

    Interface address:/api/rest_j/v1/contextservice/getHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    sourceContext SourcefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete history#

    Interface address:/api/rest_j/v1/contextservice/removeHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    search history#

    Interface address:/api/rest_j/v1/contextservice/searchHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Search history

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    keywordsKeywordsfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index 1bb33e5e4a7..b752b7b9749 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ Context Listening Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Context Listening Service

    ContextListenerRestfulApi class

    Context listener service

    heartbeat#

    Interface address:/api/rest_j/v1/contextservice/heartbeat

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindIDListener#

    Interface address:/api/rest_j/v1/contextservice/onBindIDListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindKeyListener#

    Interface address:/api/rest_j/v1/contextservice/onBindKeyListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index 77b36a8e535..b5bb5f5c30f 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ Context Logging Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Context Logging Service

    ContextIDRestfulApi class

    create text record#

    Interface address: /api/rest_j/v1/contextservice/createContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create text record

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIDContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get text ID#

    Interface address: /api/rest_j/v1/contextservice/getContextID

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIdContextIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete text ID#

    Interface address: /api/rest_j/v1/contextservice/removeContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset text ID#

    Interface address: /api/rest_j/v1/contextservice/resetContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Reset Text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Search text Id execution time#

    Interface address:/api/rest_j/v1/contextservice/searchContextIDByTime

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Search text ID execution time

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    accessTimeEndAccess end timequeryfalsestring
    accessTimeStartAccess Start Timequeryfalsestring
    createTimeEndCreate end timequeryfalsestring
    createTimeStartcreate timequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    updateTimeEndUpdate end timequeryfalsestring
    updateTimeStartUpdate timequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Modify text ID#

    Interface address: /api/rest_j/v1/contextservice/updateContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Modify text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html b/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html index ae5d27e0595..efb12e68ad4 100644 --- a/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ Context API | Apache Linkis - + @@ -33,7 +33,7 @@ |contextKey|contextKey|false|String|String|

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index e39f5898461..f3571144ac1 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM Project Operation Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    BM Project Operation Management

    BmlProjectRestful class

    Attachment resource item#

    Interface address:/api/rest_j/v1/bml/attachResourceAndProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Attachment resource item

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    projectNameproject namestringfalsestring
    resourceidresource namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Create BML project#

    Interface address:/api/rest_j/v1/bml/createBmlProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Create BML project

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    accessusersaccess usersstringfalsestring
    editusersedit userstringfalsestring
    projectNameproject namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download shared resources#

    Interface address:/api/rest_j/v1/bml/downloadShareResource

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Download shared resources

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    Resourceidresource IDqueryfalsestring
    Versionversionqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Project information#

    Interface address:/api/rest_j/v1/bml/getProjectInfo

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Project information

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    ProjectNameproject namequeryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update project user#

    Interface address:/api/rest_j/v1/bml/updateProjectUsers

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Update project users

    Request parameters:

    parameter nameparameter descriptionwhether it is requiredrequest typedata typeschema
    accessusersaccess usersfalsestringstring
    editusersedit userfalsestringstring
    projectNameproject namefalsestringstring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update shared resources#

    Interface address:/api/rest_j/v1/bml/updateShareResource

    Request mode:POST

    Request data type:multipart/form-data

    Response data type:*/*

    Interface description:

    Update shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    filefileformdatafalseref
    resourceidresource IDqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Upload shared resources#

    Interface address:/api/rest_j/v1/bml/uploadShareResource

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Upload shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    expireTimeexpiration timequeryfalsestring
    expiretypefailure typequeryfalsestring
    filefile setformdatafalseref
    isexpireinvalidqueryfalsestring
    maxversionMAV versionqueryfalseref
    projectNameproject namequeryfalsestring
    resourceheaderresource headerqueryfalsestring
    systemsystemqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 9c8ebedb0fe..f8ac91ba579 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    BML Resource Management

    BmlRestfulApi class

    update owner#

    Interface address:/api/rest_j/v1/bml/changeOwner

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update owner

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newOwnerOld OwnerfalseStringString
    oldOwnerNew OwnerfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Copy resources to other users#

    Interface address:/api/rest_j/v1/bml/copyResourceToAnotherUser

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Copy resources to specified user

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    anotherUserspecified userfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete resource#

    Interface address:/api/rest_j/v1/bml/deleteResource

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete multiple resources#

    Interface address:/api/rest_j/v1/bml/deleteResources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete multiple resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdsCollection of resource IDs, delete multiple resourcestrueListList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete version#

    Interface address:/api/rest_j/v1/bml/deleteVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download resources#

    Interface address:/api/rest_j/v1/bml/download

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the resources corresponding to download through the two parameters of resourceId and version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring
    versionResource version, if not specified, defaults to latestqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get Basic#

    Interface address:/api/rest_j/v1/bml/getBasic

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Basic

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResourceInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResources

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get version information#

    Interface address: /api/rest_j/v1/bml/getVersions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get bml version information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    resourceIdResource IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rollback version#

    Interface address:/api/rest_j/v1/bml/rollbackVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rollback version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdfalseStringString
    versionRollback versionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    update resource#

    Interface address:/api/rest_j/v1/bml/updateVersion

    Request method: POST

    Request data type: multipart/form-data

    Response data type: */*

    Interface description:

    Users update resource files through http

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefilefileformDatatrueref
    resourceIdresourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload resources#

    Interface address:/api/rest_j/v1/bml/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatatruearrayfile
    expireTimeexpireTimequeryfalsestring
    expireTypeexpireTypequeryfalsestring
    isExpireisExpirequeryfalsestring
    maxVersionmaxVersionqueryfalseinteger(int32)
    resourceHeaderresourceHeaderqueryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index f0fa3f2986a..1a9424b3cb4 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    BMLFS Management

    BMLFsRestfulApi class

    Open ScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    projectNameProject namequeryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    -product-openScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/product/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    /product/openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save script from BML#

    Interface address:/api/rest_j/v1/filesystem/saveScriptToBML

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script from BML

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    creatorCreatortrueStringString
    fileNameFile nametrueStringString
    metadatametadatatrueStringString
    projectNameProject NametrueStringString
    resourceIdResource IDtrueStringString
    scriptContentContenttrueStringString
    SaveScriptToBMLjsonbodytrueSaveScriptToBMLSaveScriptToBML

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html index e5d5a8aea34..44d87a106d2 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ Ceneric Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Ceneric Api

    CommonRestfulApi class

    offline#

    Interface address:/api/rest_j/v1/offline

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Offline

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index 2704578a531..230b2b77e81 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html index d8ca6e7b7b1..f2e5c3498f1 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ Filesystem | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Filesystem

    FsRestfulApi class

    create new Dir#

    Interface address:/api/rest_j/v1/filesystem/createNewDir

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new Dir

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    create new file#

    Interface address: /api/rest_j/v1/filesystem/createNewFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete dir file or file#

    Interface address: /api/rest_j/v1/filesystem/deleteDirOrFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete dir file or file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    download#

    Interface address:/api/rest_j/v1/filesystem/download

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    charsetCharsettrueStringString
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    file info#

    Interface address:/api/rest_j/v1/filesystem/fileInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    File Information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    format#

    Interface address:/api/rest_j/v1/filesystem/formate

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    encodingencodingquerytruestring
    escapeQuotesescapeQuotesquerytruestring
    fieldDelimiterField Delimiterquerytruestring
    hasHeaderHash valuequerytrueboolean
    quotequotequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    function list#

    Interface address:/api/rest_j/v1/filesystem/getDirFileTrees

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of udf functions

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathrequest pathquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/getDirFileTrees",    "status": 0,    "message": "OK",    "data": {        "dirFileTrees": {            "name": "",            "path": "",            "properties": null,            "children": [{                "name": "",                "path": "",                "properties": {                    "size": "",                    "modifytime": ""                },                "children": ,                "isLeaf": ,                "parentPath": ""            }],            "isLeaf": ,            "parentPath":        }    }}

    root path#

    Interface address:/api/rest_j/v1/filesystem/getUserRootPath

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get root path

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathTypeFileTypequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    does it exist#

    Interface address: /api/rest_j/v1/filesystem/isExist

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Whether it exists

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    open a file#

    Interface address: /api/rest_j/v1/filesystem/openFile

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": [{            "columnName": "_c0",            "comment": "NULL",            "dataType": ""        }],        "totalPage": ,        "totalLine": ,        "page": ,        "type": "",        "fileContent": [            [""]        ]    }}

    Turn on logging#

    Interface address:/api/rest_j/v1/filesystem/openLog

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open logging

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressqueryfalsestring
    proxyUserProxy Userqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openLog",    "status": 0,    "message": "OK",    "data": {        "log": ["", ""]    }}

    Rename#

    Interface address:/api/rest_j/v1/filesystem/rename

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rename the file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newDestnew namefalseStringString
    oldDestold namefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert the result set to Excel#

    Interface address: /api/rest_j/v1/filesystem/resultsetToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Convert the result set to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetresult setqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitlimitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file typequeryfalsestring
    pathaddressqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert resultsets to Excel#

    Interface address:/api/rest_j/v1/filesystem/resultsetsToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoquerytrueboolean
    limitlimitquerytrueref
    nullValuenull valuequerytruestring
    outputFileNameOutput file namequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save the script#

    Interface address:/api/rest_j/v1/filesystem/saveScript

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathaddresstrueStringString
    SaveScriptjsonbodytrueSaveScriptSaveScript
    charsetCharsetfalseStringString
    paramsPage SizefalseObjectObject
    scriptContentpage numberfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload#

    Interface address:/api/rest_j/v1/filesystem/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload files, multiple files can be uploaded

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatafalseref
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index d0d6ca55120..3e8437be047 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ Add Global Variable | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Add Global Variable

    VariableRestfulApi class

    add global variables#

    Interface address:/api/rest_j/v1/variable/saveGlobalVariable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add global variables

    Request example:

    {    globalVariables: [{        keyID: ,        key: "",        valueID: ,        value: ""    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    globalVariablesAdded parameter data one-to-many key:globalVariables,value:ListMaptrueMap
    keyParameter name, belonging to globalVariablesStringtrueString
    valuevariable value, and key belong to the key-value pair that is contained by globalVariablesListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/variable/saveGlobalVariable",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index bd78ec3251e..e7ffcf23593 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ Admin Console Home Page Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Admin Console Home Page Interface

    QueryRestfulApi class

    admin authentication#

    Interface address:/api/rest_j/v1/jobhistory/governanceStationAdmin

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Used to verify whether it is an administrator, if it is, it will return true if it is not false

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    data: {        solution: null,        admin: true    }    message: "OK"    method: "/api/jobhistory/governanceStationAdmin"    status: 0}

    global history#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Acquire global historical data list according to conditions and get all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    executeApplicationNameoperatorqueryfalsestring
    isAdminViewWhether it is in administrator mode or normal modequeryfalseboolean
    pageSizeNumber of pagesqueryfalseref
    proxyUserProxy Userqueryfalsestring
    startDateStart timequeryfalseinteger(int64)
    statusend timequeryfalsestring
    taskIDIDqueryfalseinteger(int64)
    tpageNowpage numberqueryfalseref
    pageNowpageNowqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    {        "method": "/api/jobhistory/list",        "status": 0,        "message": "OK",        "data": {            "solution": null,            "totalPage": 90,            "tasks": [{                "taskID": ,                "instance": ",                "execId": "",                "umUser": "",                "engineInstance": null,                "executionCode": "",                "progress": "",                "logPath": "",                "resultLocation": "",                "status": "",                "createdTime": ,                "updatedTime": ,                "engineType": "",                "errCode": 0,                "errDesc": "",                "executeApplicationName": "",                "requestApplicationName": "",                "runType": "datachecker",                "paramsJson": "",                "costTime": 1000,                "strongerExecId": "",                "sourceJson": "",                "sourceTailor": "",                "engineStartTime": null,                "labels": [],                "canRetry": ,                "subJobs":            }]        }    }}

    list undo#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Undo list

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    engineTypeengineTypequeryfalsestring
    pageNowpageNowqueryfalseref
    pageSizepageSizequeryfalseref
    startDateStart timequeryfalseref
    startTaskIDstartTaskIDqueryfalseinteger(int64)
    statusstatusqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    History details#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the detailed information of a history through the history ID

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idHistoryIdqueryfalseinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/jobhistory/1928730/get",    "status": 0,    "message": "OK",    "data": {        "task": {            "taskID": ,            "instance": "",            "execId": "",            "umUser": "",            "engineInstance": "",            "executionCode": "",            "progress": "",            "logPath": "",            "resultLocation": "",            "status": "",            "createdTime":,            "updatedTime": ,            "engineType": "",            "errCode": ,            "errDesc": "",            "executeApplicationName": "",            "requestApplicationName": "",            "runType": "hql",            "paramsJson": "",            "costTime": ,            "strongerExecId": "",            "sourceJson": "",            "sourceTailor": "",            "engineStartTime": null,            "labels": [""],            "canRetry": false,            "subJobs": null        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index 148b6549dcc..cbc8dd139cf 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ Instance Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Instance Management

    InstanceRestful class

    Microservice instance list#

    Interface address: /api/rest_j/v1/microservice/allInstance

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the list of microservice management module instances to get single or multiple default all

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "instances": [{            "id": ,            "updateTime": ,            "createTime": ,            "applicationName": ",            "instance": "",            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "id": 5,                "labelValueSize": 0,                "modifiable": true,                "updateTime": ,                "createTime": ,                "featureKey": "",                "empty":            }]        }]    }}

    Get eurekaURL#

    Interface address: /api/rest_j/v1/microservice/eurekaURL

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    return eurekaURL

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "url": ""    }}

    Edit the microservice instance#

    Interface address: /api/rest_j/v1/microservice/instanceLabel

    Request method: PUT

    Request data type: application/json

    Response data type: */*

    Interface description:

    Edit or modify the instance in microservice management

    Request example:

    {    applicationName: "linkis-ps-cs"    instance: "bdpdws110004:9108"    labels: [{        labelKey: "route",        stringValue: "cs_2_dev"    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    instanceEngine instance nameStringfalseString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionStringfalseString
    labelsThe engine instance updates the parameter content, and the collection stores the map typeListfalseList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "success",    "data": {        "labels": [{            "stringValue": "",            "labelKey": "",            "feature": null,            "modifiable": ,            "featureKey": "",            "empty":        }]    }}

    Modifiable label types#

    Interface address:/api/rest_j/v1/microservice/modifiableLabelKey

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of label types that can be modified, such as 'userCreator, route'

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {    "keyList": []    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index 7507329a1f2..7968b382b73 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ History Job Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "admin": true    }}

    getHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ididpathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    listHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    taskIDtaskIDpathfalseLong
    executeApplicationNameexecuteApplicationNamepathfalsestring
    creatorcreatorpathfalsestring
    proxyUserproxyUserpathfalsestring
    isAdminViewisAdminViewpathfalseBoolean

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}

    listUndoneHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    startTaskIDstartTaskIDpathfalseLong
    engineTypeengineTypepathfalsestring
    creatorcreatorpathfalsestring

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html index 90e9c1470e0..48950b37a74 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis Error Codes | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis Error Codes

    LinkisErrorCodeRestful class

    Get Linkis error code#

    Interface address:/api/rest_j/v1/errorcode/getAllErrorCodes

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Linkis error code list

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index cde62f579f5..f71484ef032 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq Table Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Mdq Table Interface

    MdqTableRestfulApi class

    Activate table operations#

    Interface address:/api/rest_j/v1/datasource/active

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Activate table operation

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableIdTable IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Generate the DDL statement for the new library table#

    Interface address:/api/rest_j/v1/datasource/displaysql

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Generate DDL statement for new library table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableTableStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get partition statistics#

    Interface address:/api/rest_j/v1/datasource/getPartitionStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get partition statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    partitionSortPartition SortStringfalseString
    tableNametable namequeryfalsestring
    partitionPathpartitionPathqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table information#

    Interface address:/api/rest_j/v1/datasource/getTableBaseInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table field information#

    Interface address:/api/rest_j/v1/datasource/getTableFieldsInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table field information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table statistics#

    Interface address:/api/rest_j/v1/datasource/getTableStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    partitionSortPartition Sortqueryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Active ID#

    Interface address:/api/rest_j/v1/datasource/persistTable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Activated logo

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    tableTablefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index ea01e86f662..d2559a1795f 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index 989366f91ce..f31ae6e0253 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ Parameter Configuration | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Parameter Configuration

    ConfigurationRestfulApi class

    Add KeyForEngine#

    Interface address:/api/rest_j/v1/configuration/addKeyForEngine

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Add KeyForEngine

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    engineTypeengineTypequeryfalsestring
    keyJsonkeyJsonqueryfalsestring
    tokentokenqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Add application type#

    Interface address:/api/rest_j/v1/configuration/createFirstCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add application type tag

    Request example:

    {    "categoryName": "",    "description": ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryNameReference type label namefalseStringString
    descriptionDescriptionfalseStringSTRing

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createFirstCategory",    "status": 0,    "message": "OK",    "data": {}}

    Add parameter configuration#

    Interface address:/api/rest_j/v1/configuration/createSecondCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add parameter configuration

    Request example:

    {    categoryId: ,    description: "",    engineType: "",    version: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryIdParameter ConfigurationIdtrueStringString
    descriptionDescriptiontrueStringString
    engineTypeEngine TypetrueStringString
    versionversion numbertrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createSecondCategory",    "status": 0,    "message": "OK",    "data": {}}

    delete configuration#

    Interface address: /api/rest_j/v1/configuration/deleteCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete parameter configuration

    Request example:

    {    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdParameter ConfigurationIdStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/deleteCategory",    "status": 0,    "message": "OK",    "data": {}}

    Engine type list#

    Interface address:/api/rest_j/v1/configuration/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of engine types

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/engineType",    "status": 0,    "message": "OK",    "data": {    "engineType": []    }}

    App types#

    Interface address: /api/rest_j/v1/configuration/getCategory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Apply type tag in parameter configuration

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getCategory",    "status": 0,    "message": "OK",    "data": {        "Category": [{            "categoryId": ,            "labelId": ,            "categoryName": "",            "childCategory": [],            "description": null,            "tag": null,            "createTime": ,            "updateTime": ,            "level": ,            "fatherCategoryName": ""        }],        "description": null,        "tag": null,        "createTime": ,        "updateTime": ,        "level": ,        "fatherCategoryName":    }]}}

    queue resources#

    Interface address:/api/rest_j/v1/configuration/getFullTreesByAppName

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    The queue resource module in the parameter configuration returns the column and value of the queue resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorlabel namequeryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getFullTreesByAppName",    "status": 0,    "message": "OK",    "data": {        "fullTree": [{            "name": "Queue Resource",            "description": null,            "settings": [{                "id": ,                "key": "",                "description": "",                "name": "",                "defaultValue": "",                "validateType": "",                "validateRange": "[]",                "level": 1,                "engineType": ,                "treeName": "",                "valueId": ,                "configValue": "",                "configLabelId": ,                "unit": null,                "isUserDefined": ,                "hidden": ,                "advanced":            }]        }]    }}

    Get key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get key value

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    configKeyconfigKeyquerytruestring
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    configValueconfigValuetrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString
    SaveKeyValuejsonbodytrueSaveKeyValueSaveKeyValue

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Delete key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rpc test#

    Interface address: /api/rest_j/v1/configuration/rpcTest

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    rpc test

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    usernameusernamequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save queue resources#

    Interface address:/api/rest_j/v1/configuration/saveFullTree

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save queue resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorApp Type NameStringtrueString
    descriptionDescription, belonging to the content in fullTreeStringtrueString
    engineTypeEngine TypeStringtrueString
    fullTreeDetails under Application TypeListtrueList
    nameQueue resource name, which belongs to the content in fullTreeStringtrueString
    settingsDetailed content in the queue resource, belonging to the content in fullTreeListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/saveFullTree",    "status": 0,    "message": "OK",    "data": {}}

    Update category information#

    Interface address: /api/rest_j/v1/configuration/updateCategoryInfo

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update category information

    Sample Response:

    {    description: "",    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdcategoryIdStringtrueString
    descriptiondescriptionStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/updateCategoryInfo",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html b/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html index 5126fd55903..dfeba76b3f9 100644 --- a/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF Operations Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    UDF Operations Management

    UDFApi class

    new#

    Interface address:/api/rest_j/v1/udf/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    clusterNameclusterNamefalseStringString
    createTimeCreateTimefalseDateDate
    createUserCreatorfalseStringString
    descriptionDescriptionfalseStringString
    directoryCategory, personal function first-level directoryfalseStringString
    isExpireis invalidfalseBooleanBoolean
    isLoadWhether to loadfalseBooleanBoolean
    isSharedSharedfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    syssysfalseStringString
    treeIdtreeIdfalseLongLong
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    updateTimeUpdate timefalseDateDate
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf tree menu#

    Interface address:/api/rest_j/v1/udf/all

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get detailed information of udf tree menu

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathRequest PathfalseStringString
    jsonStringjsonStringfalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get udf user list#

    Interface address:/api/rest_j/v1/udf/allUdfUsers

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get udf user list

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    confirmed#

    Interface address: /api/rest_j/v1/udf/authenticate

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Prove...is real

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Publish#

    Interface address:/api/rest_j/v1/udf/publish

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF version released

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    fallback version#

    Interface address:/api/rest_j/v1/udf/rollback

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Back to version

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    set expiration#

    Interface address:/api/rest_j/v1/udf/setExpire

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Setting expired

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseLongLong

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF sharing#

    Interface address: /api/rest_j/v1/udf/shareUDF

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF sharing

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    sharedUserssharedUsersfalseListList
    udfInfoudfInfofalseUDFInfoUDFInfo

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree new#

    Interface address:/api/rest_j/v1/udf/tree/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree delete#

    Interface address:/api/rest_j/v1/udf/tree/delete/{id}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    tree delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree update#

    Interface address:/api/rest_j/v1/udf/tree/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree update

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    renew#

    Interface address:/api/rest_j/v1/udf/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF modification

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    descriptionDescriptionfalseStringString
    ididfalseLongLong
    isLoadWhether to loadfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get user directory#

    Interface address: /api/rest_j/v1/udf/userDirectory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the first-level classification of the user's personal function

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryGet the user directory of the specified collection type, if the type is UDF, get the user directory under this typefalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    version list#

    Interface address:/api/rest_j/v1/udf/versionList

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    View version list

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/jdbc_api/index.html b/docs/1.1.3/api/jdbc_api/index.html index 8f7c891d436..549418c22e5 100644 --- a/docs/1.1.3/api/jdbc_api/index.html +++ b/docs/1.1.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/linkis_task_operator/index.html b/docs/1.1.3/api/linkis_task_operator/index.html index b7504b253f7..12eed242b20 100644 --- a/docs/1.1.3/api/linkis_task_operator/index.html +++ b/docs/1.1.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit task#

    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    • Request Parameters

    {  "executionContent": {    "code": "show tables",    "runType": "sql"  },  "params": {    "variable": {// task variable       "testvar": "hello"     },    "configuration": {      "runtime": {// task runtime params         "jdbc.url": "XX"      },      "startup": { // ec start up params         "spark.executor.cores": "4"      }    }  },  "source": { //task source information    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "spark-2.4.3",    "userCreator": "hadoop-IDE"  }}

    -Sample Response

    { "method": "/api/rest_j/v1/entrance/submit", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Sample Response, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress and resource#

    • Interface /api/rest_j/v1/entrance/${execID}/progressWithResource

    • Submission method GET

    • Sample Response

    {  "method": "/api/entrance/exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2/progressWithResource",  "status": 0,  "message": "OK",  "data": {    "yarnMetrics": {      "yarnResource": [        {          "queueMemory": 9663676416,          "queueCores": 6,          "queueInstances": 0,          "jobStatus": "COMPLETED",          "applicationId": "application_1655364300926_69504",          "queue": "default"        }      ],      "memoryPercent": 0.009,      "memoryRGB": "green",      "coreRGB": "green",      "corePercent": 0.02    },    "progress": 0.5,    "progressInfo": [      {        "succeedTasks": 4,        "failedTasks": 0,        "id": "jobId-1(linkis-spark-mix-code-1946915)",        "totalTasks": 6,        "runningTasks": 0      }    ],    "execID": "exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2"  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}

    6. Get task info#

    • Interface /api/rest_j/v1/jobhistory/{id}/get

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idtask idpathtruestring
    • Sample Response
    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    7. Get result set info#

    Support for multiple result sets

    • Interface /api/rest_j/v1/filesystem/getDirFileTrees

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult directoryquerytruestring
    • Sample Response
    {  "method": "/api/filesystem/getDirFileTrees",  "status": 0,  "message": "OK",  "data": {    "dirFileTrees": {      "name": "1946923",      "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923",      "properties": null,      "children": [        {          "name": "_0.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_0.dolphin",//result set 1          "properties": {            "size": "7900",            "modifytime": "1657113288360"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        },        {          "name": "_1.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_1.dolphin",//result set 2          "properties": {            "size": "7900",            "modifytime": "1657113288614"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        }      ],      "isLeaf": false,      "parentPath": null    }  }}

    8. Get result content#

    • Interface /api/rest_j/v1/filesystem/openFile

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult pathquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref
    • Sample Response
    {  "method": "/api/filesystem/openFile",  "status": 0,  "message": "OK",  "data": {    "metadata": [      {        "columnName": "count(1)",        "comment": "NULL",        "dataType": "long"      }    ],    "totalPage": 0,    "totalLine": 1,    "page": 1,    "type": "2",    "fileContent": [      [        "28"      ]    ]  }}

    9. Get Result by stream#

    Get the result as a CSV or Excel file

    • Interface /api/rest_j/v1/filesystem/resultsetToExcel

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetcharsetqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitrow limitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file type csv or excelqueryfalsestring
    pathresult pathqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring
    • Response
    binary stream

    10. Compatible with 0.x task submission interface#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    • Request Parameters
    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {      "variable": {// task variable         "testvar": "hello"      },      "configuration": {        "runtime": {// task runtime params           "jdbc.url": "XX"        },        "startup": { // ec start up params           "spark.executor.cores": "4"        }      }    },    "source": { //task source information      "scriptPath": "file:///tmp/hadoop/test.sql"    },    "labels": {      "engineType": "spark-2.4.3",      "userCreator": "hadoop-IDE"    },    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Sample Response
    { "method": "/api/rest_j/v1/entrance/execute", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/login_api/index.html b/docs/1.1.3/api/login_api/index.html index f13c215d2e3..4d1dcba2f44 100644 --- a/docs/1.1.3/api/login_api/index.html +++ b/docs/1.1.3/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/1.1.3/api/overview/index.html b/docs/1.1.3/api/overview/index.html index 37e1e2e9813..17397512e1b 100644 --- a/docs/1.1.3/api/overview/index.html +++ b/docs/1.1.3/api/overview/index.html @@ -7,15 +7,15 @@ Overview | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/commons/variable/index.html b/docs/1.1.3/architecture/commons/variable/index.html index 1e6af625aff..348495a3a4d 100644 --- a/docs/1.1.3/architecture/commons/variable/index.html +++ b/docs/1.1.3/architecture/commons/variable/index.html @@ -7,7 +7,7 @@ Custom Variable Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Custom Variable Design

    1. General#

    Requirements Background#

         Users want to be able to define some common variables when writing code and then replace them during execution. For example, users run the same sql in batches every day, and need to specify the partition time of the previous day. If based on sql It will be more complicated to write if the system provides a variable of run_date which will be very convenient to use.

    Target#

    1. Support variable substitution of task code
    2. Support custom variables, support users to define custom variables in scripts and task parameters submitted to Linkis, support simple +, - and other calculations
    3. Preset system variables: run_date, run_month, run_today and other system variables

    2. Overall Design#

         During the execution of the Linkis task, the custom variables are carried out in Entrance, mainly through the interceptor of Entrance before the task is submitted and executed. The variable and the defined variable, and complete the code replacement through the initial value of the custom variable passed in by the task, and become the final executable code.

    2.1 Technical Architecture#

         The overall structure of custom variables is as follows. After the task is submitted, it will go through the variable replacement interceptor. First, all variables and expressions used in the code will be parsed, and then replaced with the system and user-defined initial values ​​of variables, and finally the parsed code will be submitted to EngineConn for execution. So the underlying engine is already replaced code.

    var_arc

    3 Function introduction#

         The variable types supported by Linkis are divided into custom variables and system built-in variables. The internal variables are predefined by Linkis and can be used directly. Then different variable types support different calculation formats: String supports +, integer decimal supports +-*/, date supports +-.

    3.1 Built-in variables#

    The currently supported built-in variables are as follows:

    variable namevariable typevariable meaningvariable value example
    run_dateStringData statistics time (support user's own setting, the default setting is the day before the current time), if the data of yesterday is executed today, it will be the time of yesterday, the format is yyyyMMdd20180129
    run_date_stdStringData statistics time (standard date format), if yesterday's data is executed today, it will be yesterday's time, the format is yyyy-MM-dd2018-01-29
    run_todayStringThe day after run_date (data statistics time), the format is yyyyMMdd20211210
    run_today_stdStringThe day after run_date (data statistics time) (standard format), the format is yyyy-MM-dd2021-12-10
    run_monStringThe month of the data statistics time, the format is yyyyMM202112
    run_mon_stdStringThe month of the data statistics time (standard format), the format is yyyy-MM2021-12
    run_month_beginStringThe first day of the month in which the data is counted, in the format yyyyMMdd20180101
    run_month_begin_stdStringThe first day of the month where the data statistics time is (standard date format), the format is yyyy-MM-dd2018-01-01
    run_month_now_beginStringThe first day of the month where run_today is in the format yyyyMMdd20211201
    run_month_now_begin_stdStringThe first day of the month run_today (standard format), the format is yyyy-MM-dd2021-12-01
    run_month_endStringThe last day of the month in which the data is counted, in the format yyyyMMdd20180131
    run_month_end_stdStringThe last day of the month in which the data is counted (standard date format), the format is yyyy-MM-dd2018-01-31
    run_month_now_endStringThe last day of the month where run_today is in the format yyyyMMdd20211231
    run_month_now_end_stdStringThe last day of the month in which run_today is located (standard date format), the format is yyyy-MM-dd2021-12-31
    run_quarter_beginStringThe first day of the quarter in which the data is counted, in the format yyyyMMdd20210401
    run_quarter_endStringThe last day of the quarter in which the data is counted, in the format yyyyMMdd20210630
    run_half_year_beginStringThe first day of the half year where the data statistics time is located, in the format yyyyMMdd20210101
    run_half_year_endStringThe last day of the half year where the data statistics time is located, the format is yyyyMMdd20210630
    run_year_beginStringThe first day of the year in which the data is counted, in the format yyyyMMdd20210101
    run_year_endStringThe last day of the year in which the data is counted, in the format yyyyMMdd20211231
    run_quarter_begin_stdStringThe first day of the quarter in which the data is counted (standard format), the format is yyyy-MM-dd2021-10-01
    run_quarter_end_stdStringThe last day of the quarter where the data statistics time is located (standard format), the format is yyyy-MM-dd2021-12-31
    run_half_year_begin_stdStringThe first day of the half year where the data statistics time is located (standard format), the format is yyyy-MM-dd2021-07-01
    run_half_year_end_stdStringThe last day of the half year where the data statistics time is located (standard format), the format is yyyy-MM-dd2021-12-31
    run_year_begin_stdStringThe first day of the year in which the data is counted (standard format), the format is yyyy-MM-dd2021-01-01
    run_year_end_stdStringThe last day of the year in which the data is counted (standard format), the format is yyyy-MM-dd2021-12-31

    details:

    1. run_date is the core built-in date variable, which supports user-defined date. If not specified, the default is the day before the current system time.
    2. Definition of other derived built-in date variables: other date built-in variables are calculated relative to run_date. Once run_date changes, other variable values ​​will also change automatically. Other date variables do not support setting initial values ​​and can only be modified by modifying run_date. .
    3. Built-in variables support more abundant usage scenarios: ${run_date-1} is the day before run_data; ${run_month_begin-1} is the first day of the previous month of run_month_begin, where -1 means minus one month.

    3.2 Custom variables#

         What are custom variables? User variables that are defined first and then used. User-defined variables temporarily support the definition of strings, integers, and floating-point variables. Strings support the + method, and integers and floating-point numbers support the +-*/ method. User-defined variables do not conflict with the set variable syntax supported by SparkSQL and HQL, but the same name is not allowed. How to define and use custom variables? as follows:

    ## Defined in the code, specified before the task codesql type definition method:--@set f=20.1The python/shell types are defined as follows:#@set f=20.1Note: Only one variable can be defined on one line

    The use is directly used in the code through {varName expression}, such as ${f*2}

    3.3 Variable scope#

    Custom variables in linkis also have scope, and the priority is that the variable defined in the script is greater than the Variable defined in the task parameter is greater than the built-in run_date variable. The task parameters are defined as follows:

    ##restful{    "executionContent": {"code": "select \"${f-1}\";", "runType": "sql"},    "params": {                    "variable": {f: "20.1"},                    "configuration": {                            "runtime": {                                "linkis.openlookeng.url":"http://127.0.0.1:9090"                                }                            }                    },    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.sql"},    "labels": {        "engineType": "spark-2.4.3",        "userCreator": "hadoop-IDE"    }}## java SDKJobSubmitAction.builder  .addExecuteCode(code)  .setStartupParams(startupMap)  .setUser(user) //submit user  .addExecuteUser(user) //execute user  .setLabels(labels)  .setVariableMap(varMap) //setVar  .build
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index 529974ef52b..daae7823f71 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Start engineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html index 96417de9691..f533cc72dc8 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html index be18ac356e5..b6b1f092824 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html @@ -7,7 +7,7 @@ EngineConn History Features | Apache Linkis - + @@ -16,7 +16,7 @@ engineconn-history-02.png

    4. Data structure:#

    # EC information resource record tableDROP TABLE IF EXISTS `linkis_cg_ec_resource_info_record`;CREATE TABLE `linkis_cg_ec_resource_info_record` (    `id` INT(20) NOT NULL AUTO_INCREMENT,    `label_value` VARCHAR(255) NOT NULL COMMENT 'ec labels stringValue',    `create_user` VARCHAR(128) NOT NULL COMMENT 'ec create user',    `service_instance` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'ec instance info',    `ecm_instance` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'ecm instance info ',    `ticket_id` VARCHAR(100) NOT NULL COMMENT 'ec ticket id',    `log_dir_suffix` varchar(128) COLLATE utf8_bin DEFAULT NULL COMMENT 'log path',    `request_times` INT(8) COMMENT 'resource request times',    `request_resource` VARCHAR(255) COMMENT 'request resource',    `used_times` INT(8) COMMENT 'resource used times',    `used_resource` VARCHAR(255) COMMENT 'used resource',    `release_times` INT(8) COMMENT 'resource released times',    `released_resource` VARCHAR(255) COMMENT 'released resource',    `release_time` datetime DEFAULT NULL COMMENT 'released time',    `used_time` datetime DEFAULT NULL COMMENT 'used time',    `create_time` datetime DEFAULT CURRENT_TIMESTAMP COMMENT 'create time',    PRIMARY KEY (`id`),    KEY (`ticket_id`),    UNIQUE KEY `label_value_ticket_id` (`ticket_id`, `label_value`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin;

    5. Interface Design#

    Engine history management page API interface, refer to the document Add history engine page to the management console

    6. Non-functional design#

    6.1 Security#

    No security issues are involved, the restful interface requires login authentication

    6.2 Performance#

    Less impact on engine life cycle performance

    6.3 Capacity#

    Requires regular cleaning

    6.4 High Availability#

    not involving

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index f86bdcfc3dc..a062bb99089 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html index 88245c8e334..9f36ae09f60 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html @@ -7,7 +7,7 @@ EngineConn Metrics reporting feature | Apache Linkis - + @@ -21,7 +21,7 @@ The callback method parses the resource, progress, and engine metrancs information in TaskRunningInfo and persists them respectively.

    engineconn-mitrics-2.png

    4. Data structure#

    RPC protocol TaskRunningInfo has been added to the requirement, no db table has been added

    5. Interface Design#

    No external interface

    6. Non-functional design:#

    6.1 Security#

    RPC interface internal authentication, does not involve external security issues

    6.2 Performance#

    Combined two RPC interfaces to reduce the number of reports and improve performance

    6.3 Capacity#

    Less metrics information, no impact

    6.4 High Availability#

    not involving

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 9fdd77db00c..a366a0a4c39 100644 --- a/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/entrance/index.html b/docs/1.1.3/architecture/computation_governance_services/entrance/index.html index 11e8cfe43c1..1698a6fe332 100644 --- a/docs/1.1.3/architecture/computation_governance_services/entrance/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index 57fe99784cb..56f34366b9a 100644 --- a/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html index 4c280dc77cf..ce956800d06 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 366a5a3d75a..9f4ef996f83 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index e9be9a3354d..2e989383c98 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 65f0ecab798..4a8ea7cd507 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index dd438bc850f..9c429fc9f28 100644 --- a/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/overview/index.html b/docs/1.1.3/architecture/computation_governance_services/overview/index.html index 2b9f5d0b4c2..6271d186e45 100644 --- a/docs/1.1.3/architecture/computation_governance_services/overview/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html b/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html index 2ab4b3894d8..955d33150e7 100644 --- a/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html +++ b/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -18,7 +18,7 @@
    • The relevant interface of linkis needs to be able to identify the proxy user information based on the original UserName obtained, and use the proxy user to perform various operations. And record the audit log, including the user's task execution operation, download operation
    • When the task is submitted for execution, the entry service needs to modify the executing user to be the proxy user

    5 Things to Consider & Note#

    • Users are divided into proxy users and non-proxy users. Users of proxy type cannot perform proxying to other users again.
    • It is necessary to control the list of logged-in users and system users who can be proxied, to prohibit the occurrence of arbitrary proxies, and to avoid uncontrollable permissions. It is best to support database tables to configure, and can be directly modified to take effect without restarting the service
    • Separately record log files containing proxy user operations, such as proxy execution, function update, etc. All proxy user operations of PublicService are recorded in the log, which is convenient for auditing
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html b/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html index 40746dc1f0a..5924361da66 100644 --- a/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html b/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html index 2d50b17c14f..e9a3603a012 100644 --- a/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html +++ b/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/microservice_governance_services/overview/index.html b/docs/1.1.3/architecture/microservice_governance_services/overview/index.html index 557275cd281..7625d2f7d28 100644 --- a/docs/1.1.3/architecture/microservice_governance_services/overview/index.html +++ b/docs/1.1.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/overview/index.html b/docs/1.1.3/architecture/overview/index.html index 0b5be907e7a..7a977e52021 100644 --- a/docs/1.1.3/architecture/overview/index.html +++ b/docs/1.1.3/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 0d05c62adb2..bee31e80100 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html b/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html index 0d9aa6307ba..de56bf45c76 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html index 9da0270a77b..c2223d8ae7b 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html @@ -7,7 +7,7 @@ CS Cleanup Interface Features | Apache Linkis - + @@ -44,7 +44,7 @@

    6. Non-functional design#

    6.1 Security#

    The resultful interface requires login authentication and requires an administrator to operate. The administrator user is configured in the properties file

    6.2 Performance#

    • The query ID interface searchContextIDByTime has paging, no performance impact
    • Clear the specified ID interface clearAllContextByID to limit the amount of operation data, no performance impact
    • The interface clearAllContextByTime is cleared according to the time. If the query time range is too large, the query may time out, but the task will not fail. and the cleanup operation is a single operation and does not affect other queries

    6.3 Capacity#

    This requirement provides a time range query and batch cleaning interface, which requires the upper-layer application that uses ContextService to actively clean up data.

    6.4 High Availability#

    The interface reuses the high availability of the ContextService microservice itself.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html index a3668a12800..4d3d0f0ab4c 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index ad17260ab4c..169e61cd553 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index 2edc62bb4e5..2372e690dd3 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index d04ca437a68..66cb9675166 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index d95a2b251d0..4b4e5be6842 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index b7196db2b71..cdc79f04c2c 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index c210876fbc0..b9b4262cddf 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html b/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html index d302f04e690..a9e78b982ca 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html b/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html index 2485fa9b596..e799f1366eb 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis_ps_dm_datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html b/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html index c59f7a6e418..a84eb5fabc5 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/overview/index.html b/docs/1.1.3/architecture/public_enhancement_services/overview/index.html index 772e8a67b85..a2232214545 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/overview/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html b/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html index 6b9922e1f36..df9e037484c 100644 --- a/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html +++ b/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/cluster_deployment/index.html b/docs/1.1.3/deployment/cluster_deployment/index.html index b52e76e3054..536d9b5c740 100644 --- a/docs/1.1.3/deployment/cluster_deployment/index.html +++ b/docs/1.1.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -26,7 +26,7 @@ Linux clear process sudo kill - 9 process number

    4. matters needing attention#

    4.1 It is best to start all services at the beginning, because there are dependencies between services. If some services do not exist and the corresponding backup cannot be found through Eureka, the service will fail to start. After the service fails to start, it will not restart automatically. Wait until the alternative service is added, and then close the relevant services#

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html b/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html index f8e13b7728c..f518c3db47b 100644 --- a/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html +++ b/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Deploy Linkis without HDFS | Apache Linkis - + @@ -20,7 +20,7 @@ [INFO] Retrieving result-set, may take time if result-set is large, please do not exit program.============ RESULT SET 1 ============hello ############Execute Success!!!########
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html b/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html index 5e04cb46d5e..f612e398cb9 100644 --- a/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html +++ b/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/installation_hierarchical_structure/index.html b/docs/1.1.3/deployment/installation_hierarchical_structure/index.html index d95463ccb5f..101a45cda60 100644 --- a/docs/1.1.3/deployment/installation_hierarchical_structure/index.html +++ b/docs/1.1.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html b/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html index 8017bf6e4f1..1ba8da23621 100644 --- a/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html +++ b/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html @@ -7,7 +7,7 @@ Involve Knife4j into Linkis | Apache Linkis - + @@ -21,7 +21,7 @@

    For detailed usage guidelines, please visit the knife4j official website to view:https://doc.xiaominfo.com/knife4j/

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html b/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html index 9e2b9a3e947..cf0484bb72a 100644 --- a/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html +++ b/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html @@ -7,7 +7,7 @@ Involve Prometheus into Linkis | Apache Linkis - + @@ -31,7 +31,7 @@ Then you can view one living dashboard of Linkis there.

    You can also try to integrate the Prometheus alter manager with your own webhook, where you can see if the alter message is fired.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html b/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html index 51e79833795..9b8813829ec 100644 --- a/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/linkis_scriptis_install/index.html b/docs/1.1.3/deployment/linkis_scriptis_install/index.html index 1873b5ba685..c6d9daac3a2 100644 --- a/docs/1.1.3/deployment/linkis_scriptis_install/index.html +++ b/docs/1.1.3/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ Installation and deployment of the tool scriptis | Apache Linkis - + @@ -26,7 +26,7 @@

    After modifying the configuration, reload the nginx configuration

    sudo nginx -s reload

    Note the difference between root and alias in the location configuration block in nginx

    • The processing result of root is: root path + location path.
    • The result of alias processing is: replace the location path with the alias path.
    • alias is the definition of a directory alias, root is the definition of the topmost directory

    5 scriptis usage steps#

    5.1 Log in to Linkis console normally#

    #http://10.10.10.10:8080/#/http://nginxIp:port/#/

    Because access to scriptis requires login verification, you need to log in first, obtain and cache cookies.

    5.2 Access the scriptis page after successful login#

    #http://10.10.10.10:8080/scriptis/#/homehttp://nginxIp:port/scriptis/#/home

    nginxIp: The ip of the nginx server deployed by the Linkis console, port: the port number of the nginx configuration startup, scriptis is the location address of the nginx configuration for requesting the static files of the scriptis project (can be customized)

    4.3 Using scriptis#

    Take creating a new sql query task as an example.

    step1 Create a new script Select the script type as sql type

    Rendering

    step2 Enter the statement to be queried

    Rendering

    step3 run

    Rendering

    shep4 View Results

    Rendering

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/quick_deploy/index.html b/docs/1.1.3/deployment/quick_deploy/index.html index b8c78df223d..2411cef73f9 100644 --- a/docs/1.1.3/deployment/quick_deploy/index.html +++ b/docs/1.1.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    Notice that if you use DSS or other projects that rely on Linkis version < 1.1.1, you also need to modify the ${LINKIS_HOME}/conf/linkis.properties file:

    echo "wds.linkis.session.ticket.key=bdp-user-ticket-id" >> linkis.properties

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html b/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html index 2c804907585..3736933a566 100644 --- a/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/start_metadatasource/index.html b/docs/1.1.3/deployment/start_metadatasource/index.html index 70db0024694..e9a75b84acb 100644 --- a/docs/1.1.3/deployment/start_metadatasource/index.html +++ b/docs/1.1.3/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html b/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html index 32c9ecacf1b..a1d93fbb888 100644 --- a/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html +++ b/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ installation package directory structure | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/deployment/web_install/index.html b/docs/1.1.3/deployment/web_install/index.html index ba1a02cf208..07d280997d2 100644 --- a/docs/1.1.3/deployment/web_install/index.html +++ b/docs/1.1.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_compile_and_package/index.html b/docs/1.1.3/development/linkis_compile_and_package/index.html index 1f72bf53f90..0faadcf9841 100644 --- a/docs/1.1.3/development/linkis_compile_and_package/index.html +++ b/docs/1.1.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_config/index.html b/docs/1.1.3/development/linkis_config/index.html index fccb6b7d869..2fb6fe5ace6 100644 --- a/docs/1.1.3/development/linkis_config/index.html +++ b/docs/1.1.3/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_debug/index.html b/docs/1.1.3/development/linkis_debug/index.html index 666f67b9092..087aafbbbe3 100644 --- a/docs/1.1.3/development/linkis_debug/index.html +++ b/docs/1.1.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -49,7 +49,7 @@ y

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/linkis_debug_in_mac/index.html b/docs/1.1.3/development/linkis_debug_in_mac/index.html index a67cf8b1084..ed06e16f228 100644 --- a/docs/1.1.3/development/linkis_debug_in_mac/index.html +++ b/docs/1.1.3/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/development/new_engine_conn/index.html b/docs/1.1.3/development/new_engine_conn/index.html index 3f6164db116..9cdb62210a1 100644 --- a/docs/1.1.3/development/new_engine_conn/index.html +++ b/docs/1.1.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -52,7 +52,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/1.1.3/development/swwager_instructions/index.html b/docs/1.1.3/development/swwager_instructions/index.html index 825953d4207..25d11e05874 100644 --- a/docs/1.1.3/development/swwager_instructions/index.html +++ b/docs/1.1.3/development/swwager_instructions/index.html @@ -7,7 +7,7 @@ Swwager Annotation Instructions | Apache Linkis - + @@ -21,7 +21,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/development/web_build/index.html b/docs/1.1.3/development/web_build/index.html index 0964a85a67c..7b2d7b20603 100644 --- a/docs/1.1.3/development/web_build/index.html +++ b/docs/1.1.3/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/flink/index.html b/docs/1.1.3/engine_usage/flink/index.html index 12e2924f589..be8e03b286a 100644 --- a/docs/1.1.3/engine_usage/flink/index.html +++ b/docs/1.1.3/engine_usage/flink/index.html @@ -7,16 +7,16 @@ Flink Engine Usage | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Flink Engine Usage

    This article mainly introduces the configuration, deployment and use of the flink engine in Linkis1.0.

    1. Environment configuration before Flink engine use#

    If you want to use the Flink engine on your server, you need to ensure that the following environment variables have been set correctly and that the user who started the engine has these environment variables.

    It is strongly recommended that you check these environment variables of the executing user before executing flink tasks. The specific way is

    sudo su-${username}echo ${JAVA_HOME}echo ${FLINK_HOME}
    Environment variable nameEnvironment variable contentRemarks
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop configuration pathLinkis starts the Flink on yarn mode used by the Flink engine, so yarn support is required.
    FLINK_HOMEFlink installation pathRequired
    FLINK_CONF_DIRFlink configuration pathRequired, such as ${FLINK_HOME}/conf
    FLINK_LIB_DIRFlink package pathRequired, ${FLINK_HOME}/lib

    Table 1-1 Environmental configuration list

    2. Flink engine configuration and deployment#

    2.1 Flink version selection and compilation#

    The Flink version supported by Linkis 1.0.2 and above is Flink 1.12.2. In theory, Linkis 1.0.2+ can support various versions of Flink, but the API before each version of Flink has changed too much, and you may need to modify the flink engine in Linkis according to the API changes. Code and recompile.

    2.2 Flink engineConn deployment and loading#

    The Linkis Flink engine will not be installed in Linkis 1.0.2+ by default, and you need to compile and install it manually.

    The way to compile flink separately${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/flink/mvn clean install

    The installation method is to compile the engine package, the location is

    ${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/flink/target/flink-engineconn.zip

    Then deploy to

    ${LINKIS_HOME}/lib/linkis-engineplugins

    And restart linkis-engineplugin

    cd ${LINKIS_HOME}/sbinsh linkis-daemon restart cg-engineplugin

    A more detailed introduction to engineplugin can be found in the following article. +

    Version: Next(1.1.3)

    Flink Engine Usage

    This article mainly introduces the configuration, deployment and use of the flink engine in Linkis1.0.

    1. Environment configuration before Flink engine use#

    If you want to use the Flink engine on your server, you need to ensure that the following environment variables have been set correctly and that the user who started the engine has these environment variables.

    It is strongly recommended that you check these environment variables of the executing user before executing flink tasks. The specific way is

    sudo su-${username}echo ${JAVA_HOME}echo ${FLINK_HOME}
    Environment variable nameEnvironment variable contentRemarks
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop configuration pathLinkis starts the Flink on yarn mode used by the Flink engine, so yarn support is required.
    FLINK_HOMEFlink installation pathRequired
    FLINK_CONF_DIRFlink configuration pathRequired, such as ${FLINK_HOME}/conf
    FLINK_LIB_DIRFlink package pathRequired, ${FLINK_HOME}/lib

    Table 1-1 Environmental configuration list

    2. Flink engine configuration and deployment#

    2.1 Flink version selection and compilation#

    The Flink version supported by Linkis 1.0.2 and above is Flink 1.12.2. In theory, Linkis 1.0.2+ can support various versions of Flink, but the API before each version of Flink has changed too much, and you may need to modify the flink engine in Linkis according to the API changes. Code and recompile.

    2.2 Flink engineConn deployment and loading#

    The Linkis Flink engine will not be installed in Linkis 1.0.2+ by default, and you need to compile and install it manually.

    The way to compile flink separately${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/flink/mvn clean install

    The installation method is to compile the engine package, the location is

    ${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/flink/target/flink-engineconn.zip

    Then deploy to

    ${LINKIS_HOME}/lib/linkis-engineplugins

    And restart linkis-engineplugin

    cd ${LINKIS_HOME}/sbinsh linkis-daemon restart cg-engineplugin

    A more detailed introduction to engineplugin can be found in the following article. EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/hive/index.html b/docs/1.1.3/engine_usage/hive/index.html index 8e39adf5b80..ce0432994b8 100644 --- a/docs/1.1.3/engine_usage/hive/index.html +++ b/docs/1.1.3/engine_usage/hive/index.html @@ -7,12 +7,12 @@ Hive Engine Usage | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Hive Engine Usage

    This article mainly introduces the configuration, deployment and use of Hive engineConn in Linkis1.0.

    1. Environment configuration before Hive engineConn use#

    If you want to use the hive engineConn on your linkis server, you need to ensure that the following environment variables have been set correctly and that the user who started the engineConn has these environment variables.

    It is strongly recommended that you check these environment variables of the executing user before executing hive tasks.

    Environment variable nameEnvironment variable contentRemarks
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop configuration pathRequired
    HIVE_CONF_DIRHive configuration pathRequired

    Table 1-1 Environmental configuration list

    2. Hive engineConn configuration and deployment#

    2.1 Hive version selection and compilation#

    The version of Hive supports hive1.x/hive2.x/hive3.x. The hive version supported by default is 2.3.3. If you want to modify the hive version, such as 2.3.3, you can find the linkis-engineConnplugin-hive module and change the \<hive.version> tag to 2.3 .3, then compile this module separately. +

    Version: Next(1.1.3)

    Hive Engine Usage

    This article mainly introduces the configuration, deployment and use of Hive engineConn in Linkis1.0.

    1. Environment configuration before Hive engineConn use#

    If you want to use the hive engineConn on your linkis server, you need to ensure that the following environment variables have been set correctly and that the user who started the engineConn has these environment variables.

    It is strongly recommended that you check these environment variables of the executing user before executing hive tasks.

    Environment variable nameEnvironment variable contentRemarks
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop configuration pathRequired
    HIVE_CONF_DIRHive configuration pathRequired

    Table 1-1 Environmental configuration list

    2. Hive engineConn configuration and deployment#

    2.1 Hive version selection and compilation#

    The version of Hive supports hive1.x/hive2.x/hive3.x. The hive version supported by default is 2.3.3. If you want to modify the hive version, such as 2.3.3, you can find the linkis-engineConnplugin-hive module and change the \<hive.version> tag to 2.3 .3, then compile this module separately. The default is to support hive on MapReduce, if you want to change to Hive on Tez, You need to copy all the jars prefixed with tez-* to the directory: ${LINKIS_HOME}/lib/linkis-engineconn-plugins/hive/dist/version/lib. Other hive operating modes are similar, just copy the corresponding dependencies to the lib directory of Hive EngineConn.

    2.2 hive engineConnConn deployment and loading#

    If you have already compiled your hive engineConn plug-in has been compiled, then you need to put the new plug-in in the specified location to load, you can refer to the following article for details

    EngineConnPlugin Installation

    2.3 Linkis adds Hive console parameters(optional)#

    Linkis can configure the corresponding EngineConn parameters on the management console. If your newly added EngineConn needs this feature, you can refer to the following documents:

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. Use of hive engineConn#

    Preparation for operation, queue setting#

    Hive's MapReduce task requires yarn resources, so you need to set up the queue at the beginning

    Figure 3-1 Queue settings

    You can also add the queue value in the StartUpMap of the submission parameter: startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call hive tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "hive-2.3.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "hql"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new hive script and write hivesql code.

    The implementation of the hive engineConn is by instantiating the driver instance of hive, and then the driver submits the task, and obtains the result set and displays it.

    Figure 3-2 Screenshot of the execution effect of hql

    4. Hive engineConn user settings#

    In addition to the above engineConn configuration, users can also make custom settings, including the memory size of the hive Driver process, etc.

    Figure 4-1 User-defined configuration management console of hive

    5.Hive modification log display#

    The default log interface does not display the application_id and the number of tasks completed, the user can output the log as needed @@ -26,7 +26,7 @@ </loggers></configuration>

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/jdbc/index.html b/docs/1.1.3/engine_usage/jdbc/index.html index ad0a92b7bf9..e8548c94217 100644 --- a/docs/1.1.3/engine_usage/jdbc/index.html +++ b/docs/1.1.3/engine_usage/jdbc/index.html @@ -7,16 +7,16 @@ JDBC Engine Usage | Apache Linkis - +
    -
    Version: Next(1.1.3)

    JDBC Engine Usage

    This article mainly introduces the configuration, deployment and use of JDBC EngineConn in Linkis1.0.

    1. Environment configuration before using the JDBC EngineConn#

    If you want to use the JDBC EngineConn on your server, you need to prepare the JDBC connection information, such as the connection address, user name and password of the MySQL database, etc.

    2. JDBC EngineConn configuration and deployment#

    2.1 JDBC version selection and compilation#

    The JDBC EngineConn does not need to be compiled by the user, and the compiled JDBC EngineConn plug-in package can be used directly. Drivers that have been provided include MySQL, PostgreSQL, etc.

    2.2 JDBC EngineConn deployment and loading#

    Here you can use the default loading method to use it normally, just install it according to the standard version.

    2.3 JDBC EngineConn Labels#

    Here you can use the default dml.sql to insert it and it can be used normally.

    3. The use of JDBC EngineConn#

    Ready to operate#

    You need to configure JDBC connection information, including connection address information and user name and password.

    Figure 3-1 JDBC configuration information

    You can also specify in the RuntimeMap of the submitted task

    wds.linkis.jdbc.connect.url wds.linkis.jdbc.usernamewds.linkis.jdbc.password

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call jdbc tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. +

    Version: Next(1.1.3)

    JDBC Engine Usage

    This article mainly introduces the configuration, deployment and use of JDBC EngineConn in Linkis1.0.

    1. Environment configuration before using the JDBC EngineConn#

    If you want to use the JDBC EngineConn on your server, you need to prepare the JDBC connection information, such as the connection address, user name and password of the MySQL database, etc.

    2. JDBC EngineConn configuration and deployment#

    2.1 JDBC version selection and compilation#

    The JDBC EngineConn does not need to be compiled by the user, and the compiled JDBC EngineConn plug-in package can be used directly. Drivers that have been provided include MySQL, PostgreSQL, etc.

    2.2 JDBC EngineConn deployment and loading#

    Here you can use the default loading method to use it normally, just install it according to the standard version.

    2.3 JDBC EngineConn Labels#

    Here you can use the default dml.sql to insert it and it can be used normally.

    3. The use of JDBC EngineConn#

    Ready to operate#

    You need to configure JDBC connection information, including connection address information and user name and password.

    Figure 3-1 JDBC configuration information

    You can also specify in the RuntimeMap of the submitted task

    wds.linkis.jdbc.connect.url wds.linkis.jdbc.usernamewds.linkis.jdbc.password

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call jdbc tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/openlookeng/index.html b/docs/1.1.3/engine_usage/openlookeng/index.html index de096b9f8d8..53ce0e81df7 100644 --- a/docs/1.1.3/engine_usage/openlookeng/index.html +++ b/docs/1.1.3/engine_usage/openlookeng/index.html @@ -7,19 +7,19 @@ OpenLookEng Engine | Apache Linkis - +
    -
    Version: Next(1.1.3)

    OpenLookEng Engine

    This article mainly introduces the configuration, deployment and use of the openlookeng (>=1.1.1 version support) engine.

    1 Environmental Requirements#

    If you want to deploy the openlookeng engine, you need to prepare an available openlookeng environment.

    2 Configuration and Deployment#

    2.1 version selection and compilation#

    Currently the openlookeng engine, the default version used by the client is io.hetu.core:presto-client:1.5.0

    This engine plug-in is not included in the released installation deployment package by default. +

    Version: Next(1.1.3)

    OpenLookEng Engine

    This article mainly introduces the configuration, deployment and use of the openlookeng (>=1.1.1 version support) engine.

    1 Environmental Requirements#

    If you want to deploy the openlookeng engine, you need to prepare an available openlookeng environment.

    2 Configuration and Deployment#

    2.1 version selection and compilation#

    Currently the openlookeng engine, the default version used by the client is io.hetu.core:presto-client:1.5.0

    This engine plug-in is not included in the released installation deployment package by default. You can follow this guide to deploy and install https://linkis.apache.org/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin , or follow the process below to manually compile and deploy

    Compile openlookeng separately

    ${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/openlookeng/mvn clean install

    2.2 Deployment and loading of materials#

    The engine package compiled in step 2.1 is located in

    ${linkis_code_dir}/linkis-engineconn-plugins/engineconn-plugins/openlookeng/target/out/openlookeng

    Upload to the engine directory of the server

    ${LINKIS_HOME}/lib/linkis-engineplugins

    And restart linkis-engineplugin (or refresh through the engine interface)

    cd ${LINKIS_HOME}/sbinsh linkis-daemon restart cg-engineplugin

    2.3 Engine tags#

    Linkis1.X is done through tags, so we need to insert data into our database, and the insertion method is as follows.

    EngineConnPlugin engine plugin installation

    3 The use of the engine#

    Prepare for operation#

    If the default parameters are not satisfied, you can configure some basic parameters through the parameter configuration page of the management console The service connection information of openlookeng, the default address is http://127.0.0.1:8080

    Figure 3-1 openlookeng configuration information

    You can also configure it through the parameter params.configuration.runtime in the submit task interface

    Example of http request parameters{    "executionContent": {"code": "show databases;", "runType": "sql"},    "params": {                    "variable": {},                    "configuration": {                            "runtime": {                                "linkis.openlookeng.url":"http://127.0.0.1:9090"                                }                            }                    },    "source": {"scriptPath": "file:///mnt/bdp/hadoop/1.sql"},    "labels": {        "engineType": "openlookeng-1.5.0",        "userCreator": "hadoop-IDE"    }}

    3.1 Using Linkis SDK#

    Linkis provides Java and Scala SDKs to submit tasks to the Linkis server. For details, please refer to JAVA SDK Manual. For the openlookeng task, you only need to modify the EngineConnType and CodeType parameters in the Demo:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "openlookeng-1.5.0"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, the cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of openlookeng is as follows:

    sh ./bin/linkis-cli -engineType openlookeng-1.5.0 -codeType sql -code 'show databases;' -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/overview/index.html b/docs/1.1.3/engine_usage/overview/index.html index bfd02fd6e15..f80a9774e33 100644 --- a/docs/1.1.3/engine_usage/overview/index.html +++ b/docs/1.1.3/engine_usage/overview/index.html @@ -7,16 +7,16 @@ Overview | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Overview

    1 Overview#

            Linkis, as a powerful computing middleware, can easily interface with different computing engines. By shielding the usage details of different computing engines, it provides a The unified use interface greatly reduces the operation and maintenance cost of deploying and applying Linkis's big data platform. At present, Linkis has docked several mainstream computing engines, which basically cover the data requirements in production, in order to provide more With good scalability, Linkis also provides related interfaces for accessing new engines, which can be used to access new computing engines.
    +

    Version: Next(1.1.3)

    Overview

    1 Overview#

            Linkis, as a powerful computing middleware, can easily interface with different computing engines. By shielding the usage details of different computing engines, it provides a The unified use interface greatly reduces the operation and maintenance cost of deploying and applying Linkis's big data platform. At present, Linkis has docked several mainstream computing engines, which basically cover the data requirements in production, in order to provide more With good scalability, Linkis also provides related interfaces for accessing new engines, which can be used to access new computing engines.
            The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/pipeline/index.html b/docs/1.1.3/engine_usage/pipeline/index.html index 84652179e87..ddc11efef6b 100644 --- a/docs/1.1.3/engine_usage/pipeline/index.html +++ b/docs/1.1.3/engine_usage/pipeline/index.html @@ -6,13 +6,13 @@ -pipeline engine | Apache Linkis - +Pipeline Engine | Apache Linkis +
    -
    Version: Next(1.1.3)

    pipeline engine

    This article mainly introduces the configuration, deployment and use of pipeline (>=1.1.0 version support) engine.

    1 Configuration and deployment#

    1.1 Version selection and compilation#

    Note: before compiling the pipelineengine, you need to compile the linkis project in full +

    Version: Next(1.1.3)

    Pipeline Engine

    This article mainly introduces the configuration, deployment and use of pipeline (>=1.1.0 version support) engine.

    1 Configuration and deployment#

    1.1 Version selection and compilation#

    Note: before compiling the pipelineengine, you need to compile the linkis project in full Currently, the pipeline engine needs to be installed and deployed by itself

    This engine plug-in is not included in the published installation and deployment package by default, You can follow this guide to deploy the installation https://linkis.apache.org/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin Or manually compile the deployment according to the following process

    Compile separatelypipeline

    ${linkis_code_dir}/linkis-enginepconn-pugins/engineconn-plugins/pipeline/mvn clean install

    1.2 Material deployment and loading#

    将 1.1 The engine package compiled in step, located in

    ${linkis_code_dir}/linkis-engineconn-plugins/engineconn-plugins/pipeline/target/out/pipeline

    Upload to the engine directory of the server

    ${LINKIS_HOME}/lib/linkis-engineplugins

    And restart the linkis engineplugin to refresh the engine

    cd ${LINKIS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    Or refresh through the engine interface. After the engine is placed in the corresponding directory, send a refresh request to the linkis CG engineconplugin service through the HTTP interface.

    • Interfacehttp://${engineconn-plugin-server-IP}:${port}/api/rest_j/v1/rpc/receiveAndReply

    • Request mode POST

    {  "method": "/enginePlugin/engineConn/refreshAll"}

    Check whether the engine is refreshed successfully: if you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can view thelinkis_engine_conn_plugin_bml_resourcesOf this tablelast_update_timeWhether it is the time when the refresh is triggered.

    #Log in to the database of linkisselect *  from linkis_cg_engine_conn_plugin_bml_resources

    1.3 Engine label#

    Linkis1.XIt is carried out through labels, so it is necessary to insert data into our database. The insertion method is shown below.

    EngineConnPlugin Engine plug-in installation

    2 Use of engine#

    2.1 Task submission via linkis cli#

    Link 1.0 provides cli to submit tasks. We only need to specify the corresponding enginecon and codetype tag types. The use of pipeline is as follows:

    • Note that the enginetype pipeline-1 engine version setting is prefixed. If the pipeline version is V1 , it is set to pipeline-1
    sh bin/linkis-cli -submitUser  hadoop  -engineType pipeline-1  -codeType pipeline  -code "from hdfs:///000/000/000/A.dolphin  to file:///000/000/000/B.csv"

    from hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csv 3.3 Explained

    For specific use, please refer to: Linkis CLI Manual.

    becausepipelineThe engine is mainly used to import and export files. Now let's assume that importing files from a to B is the most introduced case

    2.2 New script#

    Right click the workspace module and select Create a new workspace of typestorageScript for

    2.3 Script#

    Syntax is:from path to path#

    The syntax is file copy rule:dolphinSuffix type files are result set files that can be converted to.csvType and.xlsxType file, other types can only be copied from address a to address B, referred to as handling

    #dolphin typefrom hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.csvfrom hdfs:///000/000/000/A.dolphin to file:///000/000/000/B.xlsx
    @@ -20,7 +20,7 @@
     

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/python/index.html b/docs/1.1.3/engine_usage/python/index.html index 0c91763fa88..09e2fc1dd95 100644 --- a/docs/1.1.3/engine_usage/python/index.html +++ b/docs/1.1.3/engine_usage/python/index.html @@ -7,18 +7,21 @@ Python Engine Usage | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Python Engine Usage

    This article mainly introduces the configuration, deployment and use of the Python EngineConn in Linkis1.0.

    1. Environment configuration before using Python EngineConn#

    If you want to use the python EngineConn on your server, you need to ensure that the python execution directory and execution permissions are in the user's PATH.

    Environment variable nameEnvironment variable contentRemarks
    pythonpython execution environmentAnaconda's python executor is recommended

    Table 1-1 Environmental configuration list

    2. Python EngineConn configuration and deployment#

    2.1 Python version selection and compilation#

    Python supports python2 and -For python3, you can simply change the configuration to complete the Python version switch, without recompiling the python EngineConn version.

    2.2 python engineConn deployment and loading#

    Here you can use the default loading method to be used normally.

    2.3 tags of python EngineConn#

    Here you can use the default dml.sql to insert it and it can be used normally.

    3. Use of Python EngineConn#

    Ready to operate#

    Before submitting python on linkis, you only need to make sure that there is python path in your user's PATH.

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call python tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. +

    Version: Next(1.1.3)

    Python Engine Usage

    This article mainly introduces the configuration, deployment and use of the Python EngineConn in Linkis1.0.

    1. Environment configuration before using Python EngineConn#

    If you want to use the python EngineConn on your server, you need to ensure that the python execution directory and execution permissions are in the user's PATH.

    Environment variable nameEnvironment variable contentRemarks
    pythonpython execution environmentAnaconda's python executor is recommended

    Table 1-1 Environmental configuration list

    2. Python EngineConn configuration and deployment#

    2.1 Python version selection and compilation#

    Python supports python2 and +For python3, you can simply change the configuration to complete the Python version switch, without recompiling the python EngineConn version.

    #1: CLI to submit tasks for version switching, and set the version Python at the end of the command Version=python3 (python3: the name of the file generated when creating a soft connection, which can be customized)sh ./ bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop  -confMap  python. version=python3
    +#2: CLI to submit the task for version switching, and add the command setting to the version path python Version=/usr/bin/python (/usr/bin/python: the path of the generated file when creating the soft connection)sh ./ bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop  -confMap  python. version=/usr/bin/python
    +

    Page configuration: +

    2.2 python engineConn deployment and loading#

    Here you can use the default loading method to be used normally.

    2.3 tags of python EngineConn#

    Here you can use the default dml.sql to insert it and it can be used normally.

    3. Use of Python EngineConn#

    Ready to operate#

    Before submitting python on linkis, you only need to make sure that there is python path in your user's PATH.

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call python tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Python is as follows:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new python script, write python code and click Execute.

    The execution logic of python is to start a python through Py4j Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/shell/index.html b/docs/1.1.3/engine_usage/shell/index.html index 7c16f239951..6b8a6b073aa 100644 --- a/docs/1.1.3/engine_usage/shell/index.html +++ b/docs/1.1.3/engine_usage/shell/index.html @@ -7,16 +7,16 @@ Shell Engine Usage | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Shell Engine Usage document

    This article mainly introduces the configuration, deployment and use of Shell EngineConn in Linkis1.0

    1. The environment configuration before using the Shell EngineConn#

    If you want to use the shell EngineConn on your server, you need to ensure that the user's PATH has the bash execution directory and execution permissions.

    Environment variable nameEnvironment variable contentRemarks
    sh execution environmentbash environment variablesbash is recommended

    Table 1-1 Environmental configuration list

    2. Shell EngineConn configuration and deployment#

    2.1 Shell version selection and compilation#

    The shell EngineConn does not need to be compiled by the user, and the compiled shell EngineConn plug-in package can be used directly.

    2.2 shell engineConn deployment and loading#

    Here you can use the default loading method to be used normally.

    2.3 Labels of the shell EngineConn#

    Here you can use the default dml.sql to insert it and it can be used normally.

    3. Use of Shell EngineConn#

    Ready to operate#

    Before submitting the shell on linkis, you only need to ensure that there is the path of the shell in your user's $PATH.

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call shell tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. +

    Version: Next(1.1.3)

    Shell Engine Usage document

    This article mainly introduces the configuration, deployment and use of Shell EngineConn in Linkis1.0

    1. The environment configuration before using the Shell EngineConn#

    If you want to use the shell EngineConn on your server, you need to ensure that the user's PATH has the bash execution directory and execution permissions.

    Environment variable nameEnvironment variable contentRemarks
    sh execution environmentbash environment variablesbash is recommended

    Table 1-1 Environmental configuration list

    2. Shell EngineConn configuration and deployment#

    2.1 Shell version selection and compilation#

    The shell EngineConn does not need to be compiled by the user, and the compiled shell EngineConn plug-in package can be used directly.

    2.2 shell engineConn deployment and loading#

    Here you can use the default loading method to be used normally.

    2.3 Labels of the shell EngineConn#

    Here you can use the default dml.sql to insert it and it can be used normally.

    3. Use of Shell EngineConn#

    Ready to operate#

    Before submitting the shell on linkis, you only need to ensure that there is the path of the shell in your user's $PATH.

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call shell tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/spark/index.html b/docs/1.1.3/engine_usage/spark/index.html index aa219c5f38a..0e6afa19d8a 100644 --- a/docs/1.1.3/engine_usage/spark/index.html +++ b/docs/1.1.3/engine_usage/spark/index.html @@ -7,18 +7,19 @@ Spark Engine Usage | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Spark Engine Usage

    This article mainly introduces the configuration, deployment and use of spark EngineConn in Linkis1.0.

    1. Environment configuration before using Spark EngineConn#

    If you want to use the spark EngineConn on your server, you need to ensure that the following environment variables have been set correctly and that the user who started the EngineConn has these environment variables.

    It is strongly recommended that you check these environment variables of the executing user before executing spark tasks.

    Environment variable nameEnvironment variable contentRemarks
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop configuration pathRequired
    HIVE_CONF_DIRHive configuration pathRequired
    SPARK_HOMESpark installation pathRequired
    SPARK_CONF_DIRSpark configuration pathRequired
    pythonpythonAnaconda's python is recommended as the default python

    Table 1-1 Environmental configuration list

    2. Configuration and deployment of Spark EngineConn#

    2.1 Selection and compilation of spark version#

    In theory, Linkis1.0 supports all versions of spark2.x and above. Spark 2.4.3 is the default supported version. If you want to use your spark version, such as spark2.1.0, you only need to modify the version of the plug-in spark and then compile it. Specifically, you can find the linkis-engineplugin-spark module, change the \<spark.version> tag to 2.1.0, and then compile this module separately.

    2.2 spark engineConn deployment and loading#

    If you have already compiled your spark EngineConn plug-in has been compiled, then you need to put the new plug-in to the specified location to load, you can refer to the following article for details

    EngineConnPlugin Installation

    2.3 tags of spark EngineConn#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. Use of spark EngineConn#

    Preparation for operation, queue setting#

    Because the execution of spark is a resource that requires a queue, the user must set up a queue that he can execute before executing.

    Figure 3-1 Queue settings

    You can also add the queue value in the StartUpMap of the submission parameter: startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call Spark tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. -If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Spark is as follows:

    ## codeType py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis and create a new sql, scala or pyspark script for execution.

    The sql method is the simplest. You can create a new sql script and write and execute it. When it is executed, the progress will be displayed. If the user does not have a spark EngineConn at the beginning, the execution of sql will start a spark session (it may take some time here), +

    Version: Next(1.1.3)

    Spark Engine Usage

    This article mainly introduces the configuration, deployment and use of spark EngineConn in Linkis1.0.

    1. Environment configuration before using Spark EngineConn#

    If you want to use the spark EngineConn on your server, you need to ensure that the following environment variables have been set correctly and that the user who started the EngineConn has these environment variables.

    It is strongly recommended that you check these environment variables of the executing user before executing spark tasks.

    Environment variable nameEnvironment variable contentRemarks
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop configuration pathRequired
    HIVE_CONF_DIRHive configuration pathRequired
    SPARK_HOMESpark installation pathRequired
    SPARK_CONF_DIRSpark configuration pathRequired
    pythonpythonAnaconda's python is recommended as the default python

    Table 1-1 Environmental configuration list

    2. Configuration and deployment of Spark EngineConn#

    2.1 Selection and compilation of spark version#

    In theory, Linkis1.0 supports all versions of spark2.x and above. Spark 2.4.3 is the default supported version. If you want to use your spark version, such as spark2.1.0, you only need to modify the version of the plug-in spark and then compile it. Specifically, you can find the linkis-engineplugin-spark module, change the \<spark.version> tag to 2.1.0, and then compile this module separately.

    2.2 spark engineConn deployment and loading#

    If you have already compiled your spark EngineConn plug-in has been compiled, then you need to put the new plug-in to the specified location to load, you can refer to the following article for details

    EngineConnPlugin Installation

    2.3 tags of spark EngineConn#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. Use of spark EngineConn#

    Preparation for operation, queue setting#

    Because the execution of spark is a resource that requires a queue, the user must set up a queue that he can execute before executing.

    Figure 3-1 Queue settings

    You can also add the queue value in the StartUpMap of the submission parameter: startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 How to use Linkis SDK#

    Linkis provides a client method to call Spark tasks. The call method is through the SDK provided by LinkisClient. We provide java and scala two ways to call, the specific usage can refer to JAVA SDK Manual. +If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Spark is as follows:

    ## codeType correspondence py-->pyspark sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop
    +# You can specify the yarn queue in the submission parameter by -confMap wds.linkis.yarnqueue=dwssh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis and create a new sql, scala or pyspark script for execution.

    The sql method is the simplest. You can create a new sql script and write and execute it. When it is executed, the progress will be displayed. If the user does not have a spark EngineConn at the beginning, the execution of sql will start a spark session (it may take some time here), After the SparkSession is initialized, you can start to execute sql.

    Figure 3-2 Screenshot of the execution effect of sparksql

    For spark-scala tasks, we have initialized sqlContext and other variables, and users can directly use this sqlContext to execute sql.

    Figure 3-3 Execution effect diagram of spark-scala

    Similarly, in the way of pyspark, we have also initialized the SparkSession, and users can directly use spark.sql to execute SQL.

    Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/1.1.3/engine_usage/sqoop/index.html b/docs/1.1.3/engine_usage/sqoop/index.html index 45d2d33f51e..5309016095a 100644 --- a/docs/1.1.3/engine_usage/sqoop/index.html +++ b/docs/1.1.3/engine_usage/sqoop/index.html @@ -7,12 +7,12 @@ Sqoop Engine | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Sqoop Engine usage documentation

    This article mainly introduces the configuration, deployment and use of the Sqoop engine in Linkis1.X.

    1.Sqoop engine Linkis system parameter configuration#

    The Sqoop engine mainly depends on the Hadoop basic environment. If the node needs to deploy the Sqoop engine, the Hadoop client environment needs to be deployed.

    It is strongly recommended that you use the native Sqoop to execute the test task on the node before executing the Sqoop task to check whether the node environment is normal.

    Environment Variable NameEnvironment Variable ContentRemark
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop installation pathRequired
    SQOOP_HOMESqoop installation pathNot Required
    SQOOP_CONF_DIRSqoop config pathNot Required
    HCAT_HOMEHCAT config pathNot Required
    HBASE_HOMEHBASE config pathNot Required

    表1-1 环境配置清单

    Linkis Parameter NameParameter ContentRemark
    wds.linkis.hadoop.site.xmlSet sqoop to load hadoop parameter file locationRequired,Reference example:"/etc/hadoop/conf/core-site.xml;/etc/hadoop/conf/hdfs-site.xml;/etc/hadoop/conf/yarn-site.xml;/etc/hadoop/conf/mapred-site.xml"
    sqoop.fetch.status.intervalSet the interval time for obtaining sqoop execution statusNot required, the default value is 5s

    2.Sqoop Engine configuration and deployment#

    2.1 Sqoop Version selection and compilation#

    Mainstream Sqoop versions 1.4.6 and 1.4.7 supported by Linkis 1.1.2 and above, and later versions may need to modify some code and recompile

    2.2 Sqoop engineConn deploy and load#

    Note: Before compiling the sqoop engine, the linkis project needs to be fully compiled

    Compile sqoop separately:${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/sqoop/mvn clean install

    The installation method is to compile the compiled engine package, located in

    ${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/sqoop/target/sqoop-engineconn.zip

    and then deploy to

    ${LINKIS_HOME}/lib/linkis-engineplugins

    and restart linkis-engineplugin

    cd ${LINKIS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    More engineplugin details can be found in the following article.
    +

    Version: Next(1.1.3)

    Sqoop Engine usage documentation

    This article mainly introduces the configuration, deployment and use of the Sqoop engine in Linkis1.X.

    1.Sqoop engine Linkis system parameter configuration#

    The Sqoop engine mainly depends on the Hadoop basic environment. If the node needs to deploy the Sqoop engine, the Hadoop client environment needs to be deployed.

    It is strongly recommended that you use the native Sqoop to execute the test task on the node before executing the Sqoop task to check whether the node environment is normal.

    Environment Variable NameEnvironment Variable ContentRemark
    JAVA_HOMEJDK installation pathRequired
    HADOOP_HOMEHadoop installation pathRequired
    HADOOP_CONF_DIRHadoop installation pathRequired
    SQOOP_HOMESqoop installation pathNot Required
    SQOOP_CONF_DIRSqoop config pathNot Required
    HCAT_HOMEHCAT config pathNot Required
    HBASE_HOMEHBASE config pathNot Required

    表1-1 环境配置清单

    Linkis Parameter NameParameter ContentRemark
    wds.linkis.hadoop.site.xmlSet sqoop to load hadoop parameter file locationRequired,Reference example:"/etc/hadoop/conf/core-site.xml;/etc/hadoop/conf/hdfs-site.xml;/etc/hadoop/conf/yarn-site.xml;/etc/hadoop/conf/mapred-site.xml"
    sqoop.fetch.status.intervalSet the interval time for obtaining sqoop execution statusNot required, the default value is 5s

    2.Sqoop Engine configuration and deployment#

    2.1 Sqoop Version selection and compilation#

    Mainstream Sqoop versions 1.4.6 and 1.4.7 supported by Linkis 1.1.2 and above, and later versions may need to modify some code and recompile

    2.2 Sqoop engineConn deploy and load#

    Note: Before compiling the sqoop engine, the linkis project needs to be fully compiled

    Compile sqoop separately:${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/sqoop/mvn clean install

    The installation method is to compile the compiled engine package, located in

    ${linkis_code_dir}/linkis-enginepconn-lugins/engineconn-plugins/sqoop/target/sqoop-engineconn.zip

    and then deploy to

    ${LINKIS_HOME}/lib/linkis-engineplugins

    and restart linkis-engineplugin

    cd ${LINKIS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    More engineplugin details can be found in the following article.
    https://linkis.apache.org/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation

    3.Sqoop Engine Usage#

    3.1 OnceEngineConn#

    OnceEngineConn is used by calling LinkisManager's createEngineConn interface through LinkisManagerClient, and sending the code to the created Sqoop engine, and then the Sqoop engine starts to execute. This method can be called by other systems, such as Exchange. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies into your project

    <dependency>    <groupId>org.apache.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Test Case:

    
     package com.webank.wedatasphere.exchangis.job.server.log.client
     import java.util.concurrent.TimeUnit
    @@ -22,10 +22,10 @@
     object SqoopOnceJobTest extends App {  LinkisJobBuilder.setDefaultServerUrl("http://127.0.0.1:9001")  val logPath = "C:\\Users\\resources\\log4j.properties"  System.setProperty("log4j.configurationFile", logPath)  val startUpMap = new util.HashMap[String, Any]  startUpMap.put("wds.linkis.engineconn.java.driver.memory", "1g")   val builder = SimpleOnceJob.builder().setCreateService("Linkis-Client")     .addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "sqoop-1.4.6")     .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "Client")     .addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")     .setStartupParams(startUpMap)     .setMaxSubmitTime(30000)     .addExecuteUser("freeuser")  val onceJob = importJob(builder)  val time = System.currentTimeMillis()  onceJob.submit()  println(onceJob.getId)  val logOperator = onceJob.getOperator(EngineConnLogOperator.OPERATOR_NAME).asInstanceOf[EngineConnLogOperator]  println(onceJob.getECMServiceInstance)  logOperator.setFromLine(0)  logOperator.setECMServiceInstance(onceJob.getECMServiceInstance)  logOperator.setEngineConnType("sqoop")  logOperator.setIgnoreKeywords("[main],[SpringContextShutdownHook]")  var progressOperator = onceJob.getOperator(EngineConnProgressOperator.OPERATOR_NAME).asInstanceOf[EngineConnProgressOperator]  var metricOperator = onceJob.getOperator(EngineConnMetricsOperator.OPERATOR_NAME).asInstanceOf[EngineConnMetricsOperator]  var end = false  var rowBefore = 1  while (!end || rowBefore > 0){       if(onceJob.isCompleted) {         end = true         metricOperator = null       }      logOperator.setPageSize(100)      Utils.tryQuietly{        val logs = logOperator.apply()        logs.logs.asScala.foreach( log => {          println(log)        })        rowBefore = logs.logs.size    }    Thread.sleep(3000)    Option(metricOperator).foreach( operator => {      if (!onceJob.isCompleted){        println(s"Metric Monitor: ${operator.apply()}")        println(s"Progress: ${progressOperator.apply()}")      }    })  }  onceJob.isCompleted  onceJob.waitForCompleted()  println(onceJob.getStatus)  println(TimeUnit.SECONDS.convert(System.currentTimeMillis() - time, TimeUnit.MILLISECONDS) + "s")  System.exit(0)
     
        def importJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = {     jobBuilder       .addJobContent("sqoop.env.mapreduce.job.queuename", "queue_10")       .addJobContent("sqoop.mode", "import")       .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis")       .addJobContent("sqoop.args.username", "free")       .addJobContent("sqoop.args.password", "testpwd")       .addJobContent("sqoop.args.query", "select id as order_number, sno as time from" +         " exchangis where sno =1 and $CONDITIONS")       .addJobContent("sqoop.args.hcatalog.database", "freedb")       .addJobContent("sqoop.args.hcatalog.table", "zy_test")       .addJobContent("sqoop.args.hcatalog.partition.keys", "month")       .addJobContent("sqoop.args.hcatalog.partition.values", "3")       .addJobContent("sqoop.args.num.mappers", "1")       .build()   }
    -   def exportJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = {      jobBuilder        .addJobContent("sqoop.env.mapreduce.job.queuename", "queue1")        .addJobContent("sqoop.mode", "import")        .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis")        .addJobContent("sqoop.args.query", "select id as order, sno as great_time from" +          " exchangis_table where sno =1 and $CONDITIONS")        .addJobContent("sqoop.args.hcatalog.database", "hadoop")        .addJobContent("sqoop.args.hcatalog.table", "partition_33")        .addJobContent("sqoop.args.hcatalog.partition.keys", "month")        .addJobContent("sqoop.args.hcatalog.partition.values", "4")        .addJobContent("sqoop.args.num.mappers", "1")        .build()   }

    Parameter Comparison table (with native parameters):**

    sqoop.env.mapreduce.job.queuename<=>-Dmapreduce.job.queuenamesqoop.args.connection.manager<===>--connection-managersqoop.args.connection.param.file<===>--connection-param-filesqoop.args.driver<===>--driversqoop.args.hadoop.home<===>--hadoop-homesqoop.args.hadoop.mapred.home<===>--hadoop-mapred-homesqoop.args.help<===>helpsqoop.args.password<===>--passwordsqoop.args.password.alias<===>--password-aliassqoop.args.password.file<===>--password-filesqoop.args.relaxed.isolation<===>--relaxed-isolationsqoop.args.skip.dist.cache<===>--skip-dist-cachesqoop.args.username<===>--usernamesqoop.args.verbose<===>--verbosesqoop.args.append<===>--appendsqoop.args.as.avrodatafile<===>--as-avrodatafilesqoop.args.as.parquetfile<===>--as-parquetfilesqoop.args.as.sequencefile<===>--as-sequencefilesqoop.args.as.textfile<===>--as-textfilesqoop.args.autoreset.to.one.mapper<===>--autoreset-to-one-mappersqoop.args.boundary.query<===>--boundary-querysqoop.args.case.insensitive<===>--case-insensitivesqoop.args.columns<===>--columnssqoop.args.compression.codec<===>--compression-codecsqoop.args.delete.target.dir<===>--delete-target-dirsqoop.args.direct<===>--directsqoop.args.direct.split.size<===>--direct-split-sizesqoop.args.query<===>--querysqoop.args.fetch.size<===>--fetch-sizesqoop.args.inline.lob.limit<===>--inline-lob-limitsqoop.args.num.mappers<===>--num-mapperssqoop.args.mapreduce.job.name<===>--mapreduce-job-namesqoop.args.merge.key<===>--merge-keysqoop.args.split.by<===>--split-bysqoop.args.table<===>--tablesqoop.args.target.dir<===>--target-dirsqoop.args.validate<===>--validatesqoop.args.validation.failurehandler<===>--validation-failurehandlersqoop.args.validation.threshold<===> --validation-thresholdsqoop.args.validator<===>--validatorsqoop.args.warehouse.dir<===>--warehouse-dirsqoop.args.where<===>--wheresqoop.args.compress<===>--compresssqoop.args.check.column<===>--check-columnsqoop.args.incremental<===>--incrementalsqoop.args.last.value<===>--last-valuesqoop.args.enclosed.by<===>--enclosed-bysqoop.args.escaped.by<===>--escaped-bysqoop.args.fields.terminated.by<===>--fields-terminated-bysqoop.args.lines.terminated.by<===>--lines-terminated-bysqoop.args.mysql.delimiters<===>--mysql-delimiterssqoop.args.optionally.enclosed.by<===>--optionally-enclosed-bysqoop.args.input.enclosed.by<===>--input-enclosed-bysqoop.args.input.escaped.by<===>--input-escaped-bysqoop.args.input.fields.terminated.by<===>--input-fields-terminated-bysqoop.args.input.lines.terminated.by<===>--input-lines-terminated-bysqoop.args.input.optionally.enclosed.by<===>--input-optionally-enclosed-bysqoop.args.create.hive.table<===>--create-hive-tablesqoop.args.hive.delims.replacement<===>--hive-delims-replacementsqoop.args.hive.database<===>--hive-databasesqoop.args.hive.drop.import.delims<===>--hive-drop-import-delimssqoop.args.hive.home<===>--hive-homesqoop.args.hive.import<===>--hive-importsqoop.args.hive.overwrite<===>--hive-overwritesqoop.args.hive.partition.value<===>--hive-partition-valuesqoop.args.hive.table<===>--hive-tablesqoop.args.column.family<===>--column-familysqoop.args.hbase.bulkload<===>--hbase-bulkloadsqoop.args.hbase.create.table<===>--hbase-create-tablesqoop.args.hbase.row.key<===>--hbase-row-keysqoop.args.hbase.table<===>--hbase-tablesqoop.args.hcatalog.database<===>--hcatalog-databasesqoop.args.hcatalog.home<===>--hcatalog-homesqoop.args.hcatalog.partition.keys<===>--hcatalog-partition-keyssqoop.args.hcatalog.partition.values<===>--hcatalog-partition-valuessqoop.args.hcatalog.table<===>--hcatalog-tablesqoop.args.hive.partition.key<===>--hive-partition-keysqoop.args.map.column.hive<===>--map-column-hivesqoop.args.create.hcatalog.table<===>--create-hcatalog-tablesqoop.args.hcatalog.storage.stanza<===>--hcatalog-storage-stanzasqoop.args.accumulo.batch.size<===>--accumulo-batch-sizesqoop.args.accumulo.column.family<===>--accumulo-column-familysqoop.args.accumulo.create.table<===>--accumulo-create-tablesqoop.args.accumulo.instance<===>--accumulo-instancesqoop.args.accumulo.max.latency<===>--accumulo-max-latencysqoop.args.accumulo.password<===>--accumulo-passwordsqoop.args.accumulo.row.key<===>--accumulo-row-keysqoop.args.accumulo.table<===>--accumulo-tablesqoop.args.accumulo.user<===>--accumulo-usersqoop.args.accumulo.visibility<===>--accumulo-visibilitysqoop.args.accumulo.zookeepers<===>--accumulo-zookeeperssqoop.args.bindir<===>--bindirsqoop.args.class.name<===>--class-namesqoop.args.input.null.non.string<===>--input-null-non-stringsqoop.args.input.null.string<===>--input-null-stringsqoop.args.jar.file<===>--jar-filesqoop.args.map.column.java<===>--map-column-javasqoop.args.null.non.string<===>--null-non-stringsqoop.args.null.string<===>--null-stringsqoop.args.outdir<===>--outdirsqoop.args.package.name<===>--package-namesqoop.args.conf<===>-confsqoop.args.D<===>-Dsqoop.args.fs<===>-fssqoop.args.jt<===>-jtsqoop.args.files<===>-filessqoop.args.libjars<===>-libjarssqoop.args.archives<===>-archivessqoop.args.update.key<===>--update-keysqoop.args.update.mode<===>--update-modesqoop.args.export.dir<===>--export-dir
    - + \ No newline at end of file diff --git a/docs/1.1.3/introduction/index.html b/docs/1.1.3/introduction/index.html index 56079440d6d..5b1427b5570 100644 --- a/docs/1.1.3/introduction/index.html +++ b/docs/1.1.3/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/1.1.3/release-notes-1.1.3/index.html b/docs/1.1.3/release-notes-1.1.3/index.html index e4f928a7362..bd3ec9984c7 100644 --- a/docs/1.1.3/release-notes-1.1.3/index.html +++ b/docs/1.1.3/release-notes-1.1.3/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.3-RC1 | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Release Notes 1.1.3-RC1

    Apache Linkis(incubating) 1.1.3 includes all of Project Linkis-1.1.3.

    This release mainly integrates Prometheus to provide the basic capability of monitoring on linkis microservice; add task retries parameter for task submission; add records for the relationship between tasks and execution EC; Flink engine supports downloading Yarn logs to EC log directory; front-end page Support watermark; upgrade some security vulnerability components, etc.; fix known bugs reported by the community.

    The main functions are as follows:

    • Integrate prometheus to provide basic capability of monitoring on linkis microservice
    • Task submission supports the parameter of the number of task retries
    • Flink engine supports downloading Yarn logs to EC log directory
    • Some dependency package upgrades and community-known bug fixes

    abbreviation:

    • COMMON: Linkis Common
    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager
    • PS: Linkis Public Service
    • PE: Linkis Public Enhancement
    • RPC: Linkis Common RPC
    • CG: Linkis Computation Governance

    New Feature#

    • [Common][Linkis-1656] Integrate prometheus to provide basic capability for linkis microservice monitoring
    • [EC-Flink][Linkis-2241] Add Yarn Log Operator to support downloading Yarn logs to EC log directory
    • [Web][Linkis-2235] Front-end page supports watermark
    • [Entrance][Linkis-2164] Entrance supports the parameter of task retry times
    • [EC][Linkis-2163] Add task and execution EC records, EC information is recorded in the task's Metrics field

    Enhancement#

    • [ECM][Linkis-2243] Optimize the newly registered ECM service, optimize the service load selection logic, reduce the possible impact of new service availability issues
    • [PS-Jobhistory][Linkis-2198] Optimize task code cache file name, increase time format length, to avoid conflicts in long tasks execution
    • [EC-Python][Linkis-2175] Add py4j watchdog thread to monitor java process, preventing the case that java process quit abnormally, while python process doesn't quite
    • [Common][Linkis-2150] Both common and entry modules have custom variable substitution logic, thus merge them into the common module as optimization
    • [EC-JDBC][Linkis-2142] Fix the problem that the JDBC Engine console configuration cannot take effect immediately after modification (cache time is adjusted to configurable item)
    • [Entrance][Linkis-2160] The consumption queue for task submission supports configuring specific high-volume users
    • [PE][Linkis-2200] Tag code optimization, remove the persistence of tag key-value
    • [EC][Linkis-1749] When EC starts, make it possible to limit the port segment of the specified service through parameters
    • [Common-Storage][Linkis-2168] File type in FileSource supports variable configuration
    • [Common-Storage][Linkis-2161] Added support for formatting parameters automatically when exporting the result set to an excel file
    • [Gateway][Linkis-2249] Optimize the gateway's Parser logic code
    • [Web][Linkis-2248] User resource display page is sorted by user and creator
    • [Web][Linkis-2108] Optimize the front-end page layout, unify the basic style, and optimize the secondary menu display
    • [Install][Linkis-2319] Adjust the datasource service deployment mode, and it is enabled by default; when installing, configure the initial login password
    • [Install][Linkis-2421] When installing and deploying, configure kerberos-related authentication information
    • [EC][Linkis-2159] EC log log supports scrolling by size and time
    • [Common-Scheduler][Linkis-2272] Optimized code format and added LoopArray unit test
    • [PS-ContextService][Linkis-2234] Added a method for batch cleaning context values ​​in contextservice

    Bugs Fix#

    • [EC][Linkis-2275] Fix the problem that the EC engine heartbeat report log feild is too long in abnormal scenarios to cause storage failure
    • [Web][Linkis-2239] Fix yarm queue resource idle/busy state usage ratio ring chart is not displayed correctly
    • [PS-ContextService][Linkis-2226] Fix FileReader and BufferedReader resources not released in final
    • [Install][Linkis-2203] The problem of shell script authorization +x permission failure occurs when compiling in different systems
    • [Entrance][Linkis-2237] Refactor JobQueueLabel and JobRunningLabel, fix task queue label and task running label bug
    • [Build][Linkis-2354] Fix the ERROR level warning problem when compiling and packaging projects under WIN system
    • [Gateway][Linkis-2329] Fix the configuration problem of LDAP integration
    • [Entrance][Linkis-2238] Optimize the result set path to be separated by date to solve the problem of too many subdirectories in a single folder. The resustset path is in the same folder, such as "/tmp/linkis/hadoop/linkis/20220516_210525/IDE/40099", which may cause too many files in one folder
    • [Entrance][Linkis-2162] Optimize the result set path to be separated by date to solve the problem of too many subdirectories in a single folder
    • [Common][Linkis-2332] Close the SpringCloud default configuration center to reduce the interference of unnecessary log information
    • [Web][Linkis-2295] remove redundant code in web install script

    Security related#

    • [PS-Jobhistory][Linkis-2248] Added parameter verification to the task query list interface to prevent sql injection security issues
    • [PS-PublicService][Linkis-1949] /api/rest_j/v1/datasource/columns interface adds user permission check

    Dependency changes#

    • [Common][Linkis-2188] Bump poi 5.2.1 to poi 5.2.2, fix possible memory allocation problems
    • [Common][Linkis-2182] Bump gson:2.8.5 to gson:2.8.9

    Thanks#

    The release of Apache Linkis(incubating) 1.1.3 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following Contributors (in no particular order): Alexkun, CCweixiao, Davidhua1996, QuintinTao, caseone, det101 , doroxinrui, huiyuanjjjjuice, husofskyzy, hzdhgf, jackxu2011, legendtkl, liuyou2, peacewong, peacewong, pjfanning, ruY9527, saLeox, seayi, wForget, wallezhang, yyuser5201314

    - + \ No newline at end of file diff --git a/docs/1.1.3/release/index.html b/docs/1.1.3/release/index.html index 88a4f750902..531c577ab48 100644 --- a/docs/1.1.3/release/index.html +++ b/docs/1.1.3/release/index.html @@ -7,7 +7,7 @@ Version overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Version overview

    Configuration Item#

    module name (service name)typeparameter namedefault valuedescription
    eureka(application-eureka.yml)Newmanagement.endpoints.web.exposure.includerefresh,info,health,metricsThe endpoint exposure range for Spring Boot Actuator
    eureka(application-eureka.yml)Neweureka.instance.metadata-map:.prometheus.path${prometheus.path:/actuator/prometheus}Prometheus monitoring endpoint for microservices registered in Eureka metadata
    common(application-linkis.yml)Neweureka.instance.metadata-map:.prometheus.path${prometheus.path:${prometheus.endpoint}}ditto
    commonNewwds.linkis.prometheus.enablefalse
    commonModifywds.linkis.server.user.restful.uri.pass.auth/api/rest_j/v1/actuator/prometheus
    commonmodifyspring.spring.cloud.config.enabledfalse

    DB Table Changes#

    For details, see the upgrade schemadb/upgrade/1.1.3_schema file in the corresponding branch of the code repository (https://github.com/apache/incubator-linkis).

    - + \ No newline at end of file diff --git a/docs/1.1.3/table/udf-table/index.html b/docs/1.1.3/table/udf-table/index.html index d5b1eb279f5..de52a32793a 100644 --- a/docs/1.1.3/table/udf-table/index.html +++ b/docs/1.1.3/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF table structure | Apache Linkis - + @@ -16,7 +16,7 @@ udf_type 3: custom function - python functionudf_type 4: custom function - scala function

    2 linkis_ps_udf_manager#

    The administrator user table of the udf function, with sharing permissions, only the front end of the udf administrator has a shared entry

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2user_namevarchar(20)YES

    ##3 linkis_ps_udf_shared_info

    udf shared record table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3user_nameusername used by the sharevarchar(50)NO

    ##4 linkis_ps_udf_tree

    Tree-level record table for udf classification

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2parentparent categorybigint(20)NO
    3nameClass name of the functionvarchar(100)YES
    4user_nameusernamevarchar(50)NO
    5descriptiondescription informationvarchar(255)YES
    6create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    7update_timetimestampNOCURRENT_TIMESTAMP
    8categorycategory distinction udf / functionvarchar(50)YES

    ##5 linkis_ps_udf_user_load

    Whether udf is the configuration loaded by default

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfoint(11)NO
    3user_nameuser ownedvarchar(50)NO

    ##6 linkis_ps_udf_version

    udf version information table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3pathThe local path of the uploaded script/jar packagevarchar(255)NO
    4bml_resource_idMaterial resource id in bmlvarchar(50)NO
    5bml_resource_versionbml material versionvarchar(20)NO
    6is_publishedwhether to publishbit(1)YES
    7register_formatregistration formatvarchar(255)YES
    8use_formatuse formatvarchar(255)YES
    9descriptionVersion descriptionvarchar(255)NO
    10create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    11md5varchar(100)YES

    ##ER diagram

    image

    - + \ No newline at end of file diff --git a/docs/1.1.3/tags/feature/index.html b/docs/1.1.3/tags/feature/index.html index f9713c41af2..f3a8741229c 100644 --- a/docs/1.1.3/tags/feature/index.html +++ b/docs/1.1.3/tags/feature/index.html @@ -7,7 +7,7 @@ 3 docs tagged with "Feature" | Apache Linkis - + @@ -15,7 +15,7 @@

    3 docs tagged with "Feature"

    View All Tags
    - + \ No newline at end of file diff --git a/docs/1.1.3/tags/index.html b/docs/1.1.3/tags/index.html index 519836bb923..fa2b7804b0a 100644 --- a/docs/1.1.3/tags/index.html +++ b/docs/1.1.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html b/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html index b5115227d5c..75fb4e5e12f 100644 --- a/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html +++ b/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/1.1.3/tuning_and_troubleshooting/overview/index.html b/docs/1.1.3/tuning_and_troubleshooting/overview/index.html index ccc1287b8b5..f238adf3aee 100644 --- a/docs/1.1.3/tuning_and_troubleshooting/overview/index.html +++ b/docs/1.1.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html b/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html index eed50feb8aa..61cd8afb0fe 100644 --- a/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html +++ b/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 424143adcb8..3bdea961ac8 100644 --- a/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/1.1.3/upgrade/upgrade_guide/index.html b/docs/1.1.3/upgrade/upgrade_guide/index.html index cbc10756d5e..63726761f6b 100644 --- a/docs/1.1.3/upgrade/upgrade_guide/index.html +++ b/docs/1.1.3/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/console_manual/index.html b/docs/1.1.3/user_guide/console_manual/index.html index fc400b2b424..345f6dfdd03 100644 --- a/docs/1.1.3/user_guide/console_manual/index.html +++ b/docs/1.1.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: Next(1.1.3)

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/how_to_use/index.html b/docs/1.1.3/user_guide/how_to_use/index.html index dbc6db82c87..4d9f4e46b93 100644 --- a/docs/1.1.3/user_guide/how_to_use/index.html +++ b/docs/1.1.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/linkis-datasource-client/index.html b/docs/1.1.3/user_guide/linkis-datasource-client/index.html index 783d9a5de76..f50358a0c1d 100644 --- a/docs/1.1.3/user_guide/linkis-datasource-client/index.html +++ b/docs/1.1.3/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/linkiscli_manual/index.html b/docs/1.1.3/user_guide/linkiscli_manual/index.html index 4cd2cbe5dd9..62ab95d60ca 100644 --- a/docs/1.1.3/user_guide/linkiscli_manual/index.html +++ b/docs/1.1.3/user_guide/linkiscli_manual/index.html @@ -7,16 +7,16 @@ Linkis-Cli Manual | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Linkis-Cli usage documentation

    Introduction#

    Linkis-Cli is a shell command line program used to submit tasks to Linkis.

    Basic case#

    You can simply submit a task to Linkis by referring to the example below

    The first step is to check whether the default configuration file linkis-cli.properties exists in the conf/ directory, and it contains the following configuration:

       wds.linkis.client.common.gatewayUrl=http://127.0.0.1:9001   wds.linkis.client.common.authStrategy=token   wds.linkis.client.common.tokenKey=Validation-Code   wds.linkis.client.common.tokenValue=BML-AUTH

    The second step is to enter the linkis installation directory and enter the command:

        ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop 

    In the third step, you will see the information on the console that the task has been submitted to linkis and started to execute.

    Linkis-cli currently only supports synchronous submission, that is, after submitting a task to linkis, it will continue to inquire about the task status and pull task logs until the task ends. If the status is successful at the end of the task, linkis-cli will also actively pull the result set and output it.

    How to use#

       ./bin/linkis-cli [parameter] [cli parameter]

    Supported parameter list#

    • cli parameters

      ParameterDescriptionData TypeIs Required
      --gwUrlManually specify the linkis gateway addressStringNo
      --authStgSpecify authentication policyStringNo
      --authKeySpecify authentication keyStringNo
      --authValSpecify authentication valueStringNo
      --userConfSpecify the configuration file locationStringNo
    • Parameters

      ParameterDescriptionData TypeIs Required
      -engTypeEngine TypeStringYes
      -runTypeExecution TypeStringYes
      -codeExecution codeStringNo
      -codePathLocal execution code file pathStringNo
      -smtUsrSpecify the submitting userStringNo
      -pxyUsrSpecify the execution userStringNo
      -creatorSpecify creatorStringNo
      -scriptPathscriptPathStringNo
      -outPathPath of output result set to fileStringNo
      -confMapconfiguration mapMapNo
      -varMapvariable map for variable substitutionMapNo
      -labelMaplinkis labelMapMapNo
      -sourceMapSpecify linkis sourceMapMapNo

    Detailed example#

    One, add cli parameters#

    Cli parameters can be passed in manually specified, this way will overwrite the conflicting configuration items in the default configuration file

        ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;" -submitUser hadoop -proxyUser hadoop --gwUrl http://127.0.0.1:9001- -authStg token --authKey [tokenKey] --authVal [tokenValue]

    Two, add engine initial parameters#

    The initial parameters of the engine can be added through the -confMap parameter. Note that the data type of the parameter is Map. The input format of the command line is as follows:

        -confMap key1=val1,key2=val2,...    

    For example: the following example sets startup parameters such as the yarn queue for engine startup and the number of spark executors:

       ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=q02,spark.executor.instances=3 -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

    Of course, these parameters can also be read in a configuration file, we will talk about it later

    Three, add tags#

    Labels can be added through the -labelMap parameter. Like the -confMap, the type of the -labelMap parameter is also Map:

       /bin/linkis-cli -engineType spark-2.4.3 -codeType sql -labelMap labelKey=labelVal -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

    Fourth, variable replacement#

    Linkis-cli variable substitution is realized by ${} symbol and -varMap

       ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from \${key};" -varMap key=testdb.test  -submitUser hadoop -proxyUser hadoop  

    During execution, the sql statement will be replaced with:

       select count(*) from testdb.test

    Note that the escape character in '\$' is to prevent the parameter from being parsed in advance by linux. If -codePath specifies the local script mode, the escape character is not required

    Five, use user configuration#

    1. linkis-cli supports loading user-defined configuration files, the configuration file path is specified by the --userConf parameter, and the configuration file needs to be in the file format of .properties
       ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  --userConf [configuration file path]
    1. Which parameters can be configured?

    All parameters can be configured, for example:

    cli parameters:

       wds.linkis.client.common.gatewayUrl=http://127.0.0.1:9001   wds.linkis.client.common.authStrategy=static   wds.linkis.client.common.tokenKey=[tokenKey]   wds.linkis.client.common.tokenValue=[tokenValue]

    parameter:

       wds.linkis.client.label.engineType=spark-2.4.3   wds.linkis.client.label.codeType=sql

    When the Map class parameters are configured, the format of the key is

        [Map prefix] + [key]

    The Map prefix includes:

    • ExecutionMap prefix: wds.linkis.client.exec
    • sourceMap prefix: wds.linkis.client.source
    • ConfigurationMap prefix: wds.linkis.client.param.conf
    • runtimeMap prefix: wds.linkis.client.param.runtime
    • labelMap prefix: wds.linkis.client.label +
      Version: Next(1.1.3)

      Linkis-Cli usage documentation

      Introduction#

      Linkis-Cli is a shell command line program used to submit tasks to Linkis.

      Basic case#

      You can simply submit a task to Linkis by referring to the example below

      The first step is to check whether the default configuration file linkis-cli.properties exists in the conf/ directory, and it contains the following configuration:

         wds.linkis.client.common.gatewayUrl=http://127.0.0.1:9001   wds.linkis.client.common.authStrategy=token   wds.linkis.client.common.tokenKey=Validation-Code   wds.linkis.client.common.tokenValue=BML-AUTH

      The second step is to enter the linkis installation directory and enter the command:

         sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop 

      In the third step, you will see the information on the console that the task has been submitted to linkis and started to execute.

      Linkis-cli currently only supports synchronous submission, that is, after submitting a task to linkis, it will continue to inquire about the task status and pull task logs until the task ends. If the status is successful at the end of the task, linkis-cli will also actively pull the result set and output it.

      How to use#

         sh ./bin/linkis-cli [parameter] [cli parameter]

      Supported parameter list#

      • cli parameters

        ParameterDescriptionData TypeIs Required
        --gwUrlManually specify the linkis gateway addressStringNo
        --authStgSpecify authentication policyStringNo
        --authKeySpecify authentication keyStringNo
        --authValSpecify authentication valueStringNo
        --userConfSpecify the configuration file locationStringNo
      • Parameters

        ParameterDescriptionData TypeIs Required
        -engTypeEngine TypeStringYes
        -runTypeExecution TypeStringYes
        -codeExecution codeStringNo
        -codePathLocal execution code file pathStringNo
        -smtUsrSpecify the submitting userStringNo
        -pxyUsrSpecify the execution userStringNo
        -creatorSpecify creatorStringNo
        -scriptPathscriptPathStringNo
        -outPathPath of output result set to fileStringNo
        -confMapconfiguration mapMapNo
        -varMapvariable map for variable substitutionMapNo
        -labelMaplinkis labelMapMapNo
        -sourceMapSpecify linkis sourceMapMapNo

      Detailed example#

      One, add cli parameters#

      Cli parameters can be passed in manually specified, this way will overwrite the conflicting configuration items in the default configuration file

          sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;" -submitUser hadoop -proxyUser hadoop --gwUrl http://127.0.0.1:9001- -authStg token --authKey [tokenKey] --authVal [tokenValue]

      Two, add engine initial parameters#

      The initial parameters of the engine can be added through the -confMap parameter. Note that the data type of the parameter is Map. The input format of the command line is as follows:

          -confMap key1=val1,key2=val2,...    

      For example: the following example sets startup parameters such as the yarn queue for engine startup and the number of spark executors:

        sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=q02,spark.executor.instances=3 -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

      Of course, these parameters can also be read in a configuration file, we will talk about it later

      Three, add tags#

      Labels can be added through the -labelMap parameter. Like the -confMap, the type of the -labelMap parameter is also Map:

        sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -labelMap labelKey=labelVal -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

      Fourth, variable replacement#

      Linkis-cli variable substitution is realized by ${} symbol and -varMap

         sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from \${key};" -varMap key=testdb.test  -submitUser hadoop -proxyUser hadoop  

      During execution, the sql statement will be replaced with:

         select count(*) from testdb.test

      Note that the escape character in '\$' is to prevent the parameter from being parsed in advance by linux. If -codePath specifies the local script mode, the escape character is not required

      Five, use user configuration#

      1. linkis-cli supports loading user-defined configuration files, the configuration file path is specified by the --userConf parameter, and the configuration file needs to be in the file format of .properties
        sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  --userConf [configuration file path]
      1. Which parameters can be configured?

      All parameters can be configured, for example:

      cli parameters:

         wds.linkis.client.common.gatewayUrl=http://127.0.0.1:9001   wds.linkis.client.common.authStrategy=static   wds.linkis.client.common.tokenKey=[tokenKey]   wds.linkis.client.common.tokenValue=[tokenValue]

      parameter:

         wds.linkis.client.label.engineType=spark-2.4.3   wds.linkis.client.label.codeType=sql

      When the Map class parameters are configured, the format of the key is

          [Map prefix] + [key]

      The Map prefix includes:

      • ExecutionMap prefix: wds.linkis.client.exec
      • sourceMap prefix: wds.linkis.client.source
      • ConfigurationMap prefix: wds.linkis.client.param.conf
      • runtimeMap prefix: wds.linkis.client.param.runtime
      • labelMap prefix: wds.linkis.client.label

      Note:

      1. variableMap does not support configuration

      2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

        Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

      Example:

      Configure engine startup parameters:

         wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

      Configure labelMap parameters:

         wds.linkis.client.label.myLabel=label123

      Six, output result set to file#

      Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

          task-[taskId]-result-[idx].txt    

      E.g:

          task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/overview/index.html b/docs/1.1.3/user_guide/overview/index.html index 93e8e2861b8..2dbfd307ead 100644 --- a/docs/1.1.3/user_guide/overview/index.html +++ b/docs/1.1.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/sdk_manual/index.html b/docs/1.1.3/user_guide/sdk_manual/index.html index 6db504d2e7d..e31b34c72bb 100644 --- a/docs/1.1.3/user_guide/sdk_manual/index.html +++ b/docs/1.1.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/1.1.3/user_guide/udf/index.html b/docs/1.1.3/user_guide/udf/index.html index ee7378945c9..e4e99be35ad 100644 --- a/docs/1.1.3/user_guide/udf/index.html +++ b/docs/1.1.3/user_guide/udf/index.html @@ -7,7 +7,7 @@ Use of UDFs | Apache Linkis - + @@ -20,7 +20,7 @@ Prerequisite: The sharing function needs to be used by the user as an administrator, otherwise the front-end page will not provide an operation entry.

    Click the share button of udf: the content box will pop up, enter the list of users you want to share (comma separated).

    Note: After sharing to others, others need to actively load the UDF before using it.

    After sharing, the shared user can find it in "Shared Function", check the load and use it.

    5 Introduction of other functions#

    5.1 UDF handover#

    For example, when the user leaves the company, it may be necessary to hand over personal udf to others. Click the Handover button, select your handover object, and click OK.

    5.2 UDF Expiration#

    For a UDF shared to others, if it has been loaded by the sharing user, the udf cannot be deleted directly, but the udf can only be marked as expired. For the time being, it is only used for marking and does not affect use.

    5.3 UDF version list#

    Click the "version list" button of a udf to view all versions of the udf. The following features are provided for each version:

    Create a new version: Copy the corresponding version to the latest version.

    Download: Download the udf file from bml to the local.

    View the source code: For the python/scala script type, you can directly view the source code, but the jar type is not supported.

    Publish: The shared udf can click to publish a certain version, so that the version will take effect for the shared user. Note: Shared users use the latest version of udf released, and individual users always use the latest version.

    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index 9ba431ecd2c..5e5e2d2b33b 100644 --- a/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ Engine Plugin Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Engine Plugin Api

    EnginePluginRestful class

    refresh#

    Interface address:/api/rest_j/v1/engineplugin/refresh

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh a single resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ecTypetypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    refresh all#

    Interface address:/api/rest_j/v1/engineplugin/refreshAll

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Refresh all ec resources

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index a4415928bb0..23a8da86c96 100644 --- a/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ Engine Material Refresh Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "msg": "Refresh successfully"    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html b/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html index 18a89abb13e..ad13dd20b77 100644 --- a/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ Task Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Task Management

    EntranceMetricRestfulApi class

    Task management

    start task#

    Interface address:/api/rest_j/v1/entrance/api/metrics/runningtask

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Start task

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task info#

    Interface address:/api/rest_j/v1/entrance/api/metrics/taskinfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    engineTypeLabelEngine Type Labelqueryfalsestring
    useruserqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html index 9b4b9179c01..71c26af8482 100644 --- a/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ Task Action | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Task Action

    EntranceRestfulApi class

    process task request#

    Interface address:/api/rest_j/v1/entrance/execute

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    The execute function handles the request submitted by the user to execute the task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonjsonbodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Submit the execute function#

    Interface address:/api/rest_j/v1/entrance/submit

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Submit execute function

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    SubmitjsonbodytrueSubmitSubmit

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    end task#

    Interface address: /api/rest_j/v1/entrance/{id}/kill

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    kill task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    End Jobs#

    Interface address: /api/rest_j/v1/entrance/{id}/killJobs

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    End Jobs

    Request example:

    {    "taskIDList": [],    "idList": []}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    idid request path generationtruestringstring
    taskIDListcollection of task IDsfalseStringString
    idListID collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/entrance/#id/killJobs",    "status": 0,    "message": "success",    "data": {        "messages": [{            "method": "",            "status": 0,            "message": "",            "data": {                "execID": ""            }        }]    }}

    task log#

    Interface address: /api/rest_j/v1/entrance/{id}/log

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get task log

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Pause task#

    Interface address:/api/rest_j/v1/entrance/{id}/pause

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Pause task

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Mission progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progress

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idTask IDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource progress#

    Interface address:/api/rest_j/v1/entrance/{id}/progressWithResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Resource progress

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    task status#

    Interface address:/api/rest_j/v1/entrance/{id}/status

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Task status

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idIDpathfalsestring
    taskIDtaskIDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index 2ea5c946bef..bc30049c8e4 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    EC Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index d4204d2f86c..e1c0b134962 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM Resource Information Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    ECM Resource Information Management

    ECResourceInfoRestfulApi class

    delete EC info#

    Interface address:/api/rest_j/v1/linkisManager/ecinfo/delete/{ticketid}}

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Delete EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidpathtruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get EC information#

    Interface address: /api/rest_j/v1/linkisManager/ecinfo/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get EC information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ticketidticketidquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    ECM resource list#

    Interface address: /api/rest_j/v1/linkisManager/listAllEMs

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Get a detailed list of all ECM resources, which can be queried according to conditions, and query all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    instanceinstance namequeryfalsestring
    nodeHealthyStatus, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'queryfalsestring
    ownerCreatorqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/listAllEMs",    "status": 0,    "message": "OK",    "data": {        "EMs": [{            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "instance": "",                "serviceInstance": {                    "instance": "",                    "applicationName": ""                },                "serviceName": "",                "featureKey": "",                "empty":            }],            "applicationName": "",            "instance": ":",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": ,                "instance":            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "owner": "",            "runningTasks": null,            "pendingTasks": null,            "succeedTasks": null,            "failedTasks": null,            "maxMemory": ,            "usedMemory": ,            "systemCPUUsed": null,            "systemLeftMemory": ,            "nodeHealthy": "",            "msg": "",            "startTime":        }]    }}

    Edit EMC instance#

    Interface address: /api/rest_j/v1/linkisManager/modifyEMInfo

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Edit or modify the instance under EMC management

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameEngine LabelfalseStringString
    emStatusInstance status, the status has the following enumeration types 'Healthy', 'UnHealthy', 'WARN', 'StockAvailable', 'StockUnavailable'falseStringString
    instanceEngine instance namefalseStringString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionfalseStringString
    labelsThe engine instance updates the parameter content, and the collection stores the map typefalseListList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/modifyEMInfo",    "status": 0,    "message": "success",    "data": {}}

    Open engine log#

    Interface address: /api/rest_j/v1/linkisManager/openEngineLog

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Open the engine log, the stdout type engine log is opened by default

    Request example:

    {    applicationName: ""    emInstance: ""    instance: ""    parameters: {        pageSize: ,        fromLine: ,        logType: ""    }}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    emInstanceInstance nameStringfalseString
    fromLineFrom LineStringfalseString
    instanceEngine instance nameStringfalseString
    logTypeLog type, default stdout type, belonging to parametersStringfalseString
    pageSizePage SizeStringfalseString
    parametersPagination informationMapfalseMap

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/linkisManager/openEngineLog",    "status": 0,    "message": "OK",    "data": {        "result": {            "logPath": "",            "logs": [""],            "endLine": ,            "rows":        },        "isError": false,        "errorMsg": ""    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index 7a76b9348e7..1f3ce60c04d 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ Engine Management | Apache Linkis - + @@ -16,7 +16,7 @@

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    applicationNameThe application name, the outermost layer is an array and the engineInstance parameter is a levelfalseStringString
    engineInstanceThe name of the engine instance, the outermost layer is an array and the applicationName parameter is a levelfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 396720793d0..078dbfe5717 100644 --- a/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Resource Management

    RMMonitorRest class

    All user resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/allUserResource

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    All user resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    pagepagequeryfalseinteger(int32)
    sizesizequeryfalseinteger(int32)
    usernameusernamequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "total": 34,        "resources": [{            "id": ,            "username": "",            "creator": "",            "engineTypeWithVersion": "",            "resourceType": "",            "maxResource": {                "memory": ,                "cores": ,                "instance":            },            "minResource": {                "memory": ,                "cores": "instance": 0            },            "usedResource": {                "memory": ,                "cores": ,                "instance":            },            "lockedResource": {                "memory": 0,                "cores": 0,                "instance": 0            },            "expectedResource": null,            "leftResource": {                "memory": ,                "cores": ,                "instance":            },            "createTime": ,            "updateTime": ,            "loadResourceStatus": "",            "queueResourceStatus":        }]    }}

    Application List#

    Interface address: /api/rest_j/v1/linkisManager/rm/applicationlist

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get the list of application engines in resource management

    Request example:

    {    userCreator: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    userCreatoruserCreatorquerytrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": ,    "status": ,    "message": "",    "data": {        "applications": [{            "creator": "",            "applicationList": {                "usedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "maxResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "lockedResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "minResource": {                    "memory": ,                    "cores": ,                    "instance":                },                "engineInstances": [{                    "resource": {                        "resourceType": "",                        "maxResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "minResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "usedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "lockedResource": {                            "memory": ,                            "cores": ,                            "instance":                        },                        "expectedResource": null,                        "leftResource": {                            "memory": ,                            "cores": ,                            "instance":                        }                    },                    "engineType": "",                    "owner": "",                    "instance": "",                    "creator": "",                    "startTime": "",                    "status": "",                    "label": ""                }]            }        }]    }}

    EngineType#

    Interface address: /api/rest_j/v1/linkisManager/rm/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    Engine Type

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Engine manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/engines

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Engine Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue manager#

    Interface address: /api/rest_j/v1/linkisManager/rm/queueresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue Manager

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodytrueobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    queue#

    Interface address: /api/rest_j/v1/linkisManager/rm/queues

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Queue

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset resources#

    Interface address:/api/rest_j/v1/linkisManager/rm/resetResource

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Reset resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdresourceIdqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Resource information#

    Interface address: /api/rest_j/v1/linkisManager/rm/userresources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Query resource list and detailed resource data such as usage percentage

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    paramparambodyfalseobject

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {            "userResources": [{            "userCreator": "",            "engineTypes": [{            "engineType": "",            "percent": ""            }],    "percent": ""        }]    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html index d27927b5db0..e28be9ff397 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ Context History Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Context History Service

    ContextHistoryRestfulApi class

    create history#

    Interface address:/api/rest_j/v1/contextservice/createHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Create History

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get multiple histories#

    Interface address:/api/rest_j/v1/contextservice/getHistories

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get multiple history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get history#

    Interface address:/api/rest_j/v1/contextservice/getHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    sourceContext SourcefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete history#

    Interface address:/api/rest_j/v1/contextservice/removeHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete history records

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextHistoryHistory contextfalseStringString
    contextIDcontext idfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    search history#

    Interface address:/api/rest_j/v1/contextservice/searchHistory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Search history

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIDContextIdfalseStringString
    keywordsKeywordsfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index bb5ec0e34e0..edc09e29fbf 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ Context Listening Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Context Listening Service

    ContextListenerRestfulApi class

    Context listener service

    heartbeat#

    Interface address:/api/rest_j/v1/contextservice/heartbeat

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindIDListener#

    Interface address:/api/rest_j/v1/contextservice/onBindIDListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    onBindKeyListener#

    Interface address:/api/rest_j/v1/contextservice/onBindKeyListener

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index 478808a6282..a4258166bdd 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ Context Logging Service | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Context Logging Service

    ContextIDRestfulApi class

    create text record#

    Interface address: /api/rest_j/v1/contextservice/createContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create text record

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIDContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get text ID#

    Interface address: /api/rest_j/v1/contextservice/getContextID

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    contextIdContextIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete text ID#

    Interface address: /api/rest_j/v1/contextservice/removeContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    reset text ID#

    Interface address: /api/rest_j/v1/contextservice/resetContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface Description:

    Reset Text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Search text Id execution time#

    Interface address:/api/rest_j/v1/contextservice/searchContextIDByTime

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Search text ID execution time

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    accessTimeEndAccess end timequeryfalsestring
    accessTimeStartAccess Start Timequeryfalsestring
    createTimeEndCreate end timequeryfalsestring
    createTimeStartcreate timequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    updateTimeEndUpdate end timequeryfalsestring
    updateTimeStartUpdate timequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Modify text ID#

    Interface address: /api/rest_j/v1/contextservice/updateContextID

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Modify text ID

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    jsonNodejsonNodebodytrueJsonNodeJsonNode
    contextIdContextIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html b/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html index 0873a7f3cb0..c7618774a0d 100644 --- a/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ Context API | Apache Linkis - + @@ -33,7 +33,7 @@ |contextKey|contextKey|false|String|String|

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index 07113e0a865..6e9ed235511 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM Project Operation Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    BM Project Operation Management

    BmlProjectRestful class

    Attachment resource item#

    Interface address:/api/rest_j/v1/bml/attachResourceAndProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Attachment resource item

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    projectNameproject namestringfalsestring
    resourceidresource namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Create BML project#

    Interface address:/api/rest_j/v1/bml/createBmlProject

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Create BML project

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    accessusersaccess usersstringfalsestring
    editusersedit userstringfalsestring
    projectNameproject namestringfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download shared resources#

    Interface address:/api/rest_j/v1/bml/downloadShareResource

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Download shared resources

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    Resourceidresource IDqueryfalsestring
    Versionversionqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    datadatasetobject
    messagedescriptionstring
    methodrequest urlstring
    statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Project information#

    Interface address:/api/rest_j/v1/bml/getProjectInfo

    Request mode:GET

    Request data type:application/x-www-form-urlencoded

    Response data type:*/*

    Interface description:

    Project information

    Request parameters:

    Parameter nameparameter descriptionrequest typemust bedata typeschema
    ProjectNameproject namequeryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update project user#

    Interface address:/api/rest_j/v1/bml/updateProjectUsers

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Update project users

    Request parameters:

    parameter nameparameter descriptionwhether it is requiredrequest typedata typeschema
    accessusersaccess usersfalsestringstring
    editusersedit userfalsestringstring
    projectNameproject namefalsestringstring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Update shared resources#

    Interface address:/api/rest_j/v1/bml/updateShareResource

    Request mode:POST

    Request data type:multipart/form-data

    Response data type:*/*

    Interface description:

    Update shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    filefileformdatafalseref
    resourceidresource IDqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Upload shared resources#

    Interface address:/api/rest_j/v1/bml/uploadShareResource

    Request mode:POST

    Request data type:application/json

    Response data type:*/*

    Interface description:

    Upload shared resources

    Request parameters:

    parameter nameparameter descriptionrequest typemust bedata typeschema
    expireTimeexpiration timequeryfalsestring
    expiretypefailure typequeryfalsestring
    filefile setformdatafalseref
    isexpireinvalidqueryfalsestring
    maxversionMAV versionqueryfalseref
    projectNameproject namequeryfalsestring
    resourceheaderresource headerqueryfalsestring
    systemsystemqueryfalsestring

    Response status:

    Status codedescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    Parameter nameparameter descriptiontypeschema
    Datadatasetobject
    Messagedescriptionstring
    Methodrequest urlstring
    Statusstatusintegerinteger

    Response example:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 5a3231d3011..d2238d317c5 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML Resource Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    BML Resource Management

    BmlRestfulApi class

    update owner#

    Interface address:/api/rest_j/v1/bml/changeOwner

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update owner

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newOwnerOld OwnerfalseStringString
    oldOwnerNew OwnerfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Copy resources to other users#

    Interface address:/api/rest_j/v1/bml/copyResourceToAnotherUser

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Copy resources to specified user

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    anotherUserspecified userfalseStringString
    resourceIdResourceIdfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete resource#

    Interface address:/api/rest_j/v1/bml/deleteResource

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete multiple resources#

    Interface address:/api/rest_j/v1/bml/deleteResources

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete multiple resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdsCollection of resource IDs, delete multiple resourcestrueListList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete version#

    Interface address:/api/rest_j/v1/bml/deleteVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdtrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Download resources#

    Interface address:/api/rest_j/v1/bml/download

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the resources corresponding to download through the two parameters of resourceId and version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring
    versionResource version, if not specified, defaults to latestqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get Basic#

    Interface address:/api/rest_j/v1/bml/getBasic

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Basic

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResourceInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get resource information#

    Interface address:/api/rest_j/v1/bml/getResources

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get resource information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get version information#

    Interface address: /api/rest_j/v1/bml/getVersions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get bml version information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    currentPagepage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    resourceIdResource IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rollback version#

    Interface address:/api/rest_j/v1/bml/rollbackVersion

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rollback version

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    resourceIdResourceIdfalseStringString
    versionRollback versionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    update resource#

    Interface address:/api/rest_j/v1/bml/updateVersion

    Request method: POST

    Request data type: multipart/form-data

    Response data type: */*

    Interface description:

    Users update resource files through http

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefilefileformDatatrueref
    resourceIdresourceIdquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload resources#

    Interface address:/api/rest_j/v1/bml/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatatruearrayfile
    expireTimeexpireTimequeryfalsestring
    expireTypeexpireTypequeryfalsestring
    isExpireisExpirequeryfalsestring
    maxVersionmaxVersionqueryfalseinteger(int32)
    resourceHeaderresourceHeaderqueryfalsestring
    systemsystemqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index 76e94ff33a2..84b1cb1a578 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    BMLFS Management

    BMLFsRestfulApi class

    Open ScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    projectNameProject namequeryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    -product-openScriptFromBML#

    Interface address:/api/rest_j/v1/filesystem/product/openScriptFromBML

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    /product/openScriptFromBML

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    fileNameFile namequerytruestring
    creatorCreatorqueryfalsestring
    resourceIdResourceIdqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save script from BML#

    Interface address:/api/rest_j/v1/filesystem/saveScriptToBML

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script from BML

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    creatorCreatortrueStringString
    fileNameFile nametrueStringString
    metadatametadatatrueStringString
    projectNameProject NametrueStringString
    resourceIdResource IDtrueStringString
    scriptContentContenttrueStringString
    SaveScriptToBMLjsonbodytrueSaveScriptToBMLSaveScriptToBML

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html index 6724985935a..a7fe7b67651 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ Ceneric Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Ceneric Api

    CommonRestfulApi class

    offline#

    Interface address:/api/rest_j/v1/offline

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Offline

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index f06850cab10..8f53f9cd82b 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ DataSourceAdminRestfulApi | Apache Linkis - + @@ -20,7 +20,7 @@ Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/3/connect-params",    "status": 0,    "message": "OK",    "data": {        "connectParams": {            "host": "127.0.0.1",            "password": "xxxxx",            "port": "9600",            "username": "linkis"        }    }}

    getVersionList#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/versions

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/versions",    "status": 0,    "message": "OK",    "data": {        "versions": [            {                "versionId": 1,                "datasourceId": 1,                "connectParams": {                    "host": "127.0.0.1",                    "password": "xxxxx",                    "port": "9600",                    "username": "linkis"                },                "parameter": "{\"host\":\"127.0.0.1\",\"port\":\"9600\",\"username\":\"linkis\",\"password\": \"rO0ABXQACUFiY2RAMjAyMg==\"}",                "comment": "Initialization Version",                "createUser": "hadoop"            }        ]    }}

    connectDataSource#

    Interface address: /api/rest_j/v1/data-source-manager/{dataSourceId}/{version}/op/connect

    Request method: PUT

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtrueinteger(int64)
    versionversionpathtrueinteger(int64)

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/data-source-manager/1/1/op/connect",    "status": 0,    "message": "OK",    "data": {        "ok": true    }}

    data-source-operate-restful-api

    connect#

    Interface address:/api/rest_j/v1/data-source-manager/op/connect/json

    Request method: POST

    Request data type: application/json

    Response data type: application/json

    Interface description:

    Request example:

    {  "connectParams": {},  "createIdentify": "",  "createSystem": "",  "createTime": "",  "createUser": "",  "dataSourceDesc": "",  "dataSourceEnv": {    "connectParams": {},    "createTime": "",    "createUser": "",    "dataSourceType": {      "classifier": "",      "description": "",      "icon": "",      "id": "",      "layers": 0,      "name": "",      "option": ""    },    "dataSourceTypeId": 0,    "envDesc": "",    "envName": "",    "id": 0,    "modifyTime": "",    "modifyUser": ""  },  "dataSourceEnvId": 0,  "dataSourceName": "",  "dataSourceType": {    "classifier": "",    "description": "",    "icon": "",    "id": "",    "layers": 0,    "name": "",    "option": ""  },  "dataSourceTypeId": 0,  "expire": true,  "id": 0,  "labels": "",  "modifyTime": "",  "modifyUser": "",  "publishedVersionId": 0,  "versionId": 0,  "versions": [    {      "comment": "",      "connectParams": {},      "createTime": "",      "createUser": "",      "datasourceId": 0,      "parameter": "",      "versionId": 0    }  ]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourcedataSourcebodytrueDataSourceDataSource
    connectParamsfalseobject
    createIdentifyfalsestring
    createSystemfalsestring
    createTimefalsestring(date-time)
    createUserfalsestring
    dataSourceDescfalsestring
    dataSourceEnvfalseDataSourceEnvDataSourceEnv
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger
    envDescfalsestring
    envNamefalsestring
    idfalseinteger
    modifyTimefalsestring
    modifyUserfalsestring
    dataSourceEnvIdfalseinteger(int64)
    dataSourceNamefalsestring
    dataSourceTypefalseDataSourceTypeDataSourceType
    classifierfalsestring
    descriptionfalsestring
    iconfalsestring
    idfalsestring
    layersfalseinteger
    namefalsestring
    optionfalsestring
    dataSourceTypeIdfalseinteger(int64)
    expirefalseboolean
    idfalseinteger(int64)
    labelsfalsestring
    modifyTimefalsestring(date-time)
    modifyUserfalsestring
    publishedVersionIdfalseinteger(int64)
    versionIdfalseinteger(int64)
    versionsfalsearrayDatasourceVersion
    commentfalsestring
    connectParamsfalseobject
    createTimefalsestring
    createUserfalsestring
    datasourceIdfalseinteger
    parameterfalsestring
    versionIdfalseinteger

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataobject
    messagestring
    methodstring
    statusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html index 543fa5f7282..b529bb355a6 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ Filesystem | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Filesystem

    FsRestfulApi class

    create new Dir#

    Interface address:/api/rest_j/v1/filesystem/createNewDir

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new Dir

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    create new file#

    Interface address: /api/rest_j/v1/filesystem/createNewFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Create a new file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathpathtrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete dir file or file#

    Interface address: /api/rest_j/v1/filesystem/deleteDirOrFile

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete dir file or file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0    }

    download#

    Interface address:/api/rest_j/v1/filesystem/download

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    charsetCharsettrueStringString
    pathaddresstrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    file info#

    Interface address:/api/rest_j/v1/filesystem/fileInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface Description:

    File Information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    format#

    Interface address:/api/rest_j/v1/filesystem/formate

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    encodingencodingquerytruestring
    escapeQuotesescapeQuotesquerytruestring
    fieldDelimiterField Delimiterquerytruestring
    hasHeaderHash valuequerytrueboolean
    quotequotequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    function list#

    Interface address:/api/rest_j/v1/filesystem/getDirFileTrees

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of udf functions

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathrequest pathquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/getDirFileTrees",    "status": 0,    "message": "OK",    "data": {        "dirFileTrees": {            "name": "",            "path": "",            "properties": null,            "children": [{                "name": "",                "path": "",                "properties": {                    "size": "",                    "modifytime": ""                },                "children": ,                "isLeaf": ,                "parentPath": ""            }],            "isLeaf": ,            "parentPath":        }    }}

    root path#

    Interface address:/api/rest_j/v1/filesystem/getUserRootPath

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get root path

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathTypeFileTypequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    does it exist#

    Interface address: /api/rest_j/v1/filesystem/isExist

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Whether it exists

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    open a file#

    Interface address: /api/rest_j/v1/filesystem/openFile

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": [{            "columnName": "_c0",            "comment": "NULL",            "dataType": ""        }],        "totalPage": ,        "totalLine": ,        "page": ,        "type": "",        "fileContent": [            [""]        ]    }}

    Turn on logging#

    Interface address:/api/rest_j/v1/filesystem/openLog

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Open logging

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathaddressqueryfalsestring
    proxyUserProxy Userqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/filesystem/openLog",    "status": 0,    "message": "OK",    "data": {        "log": ["", ""]    }}

    Rename#

    Interface address:/api/rest_j/v1/filesystem/rename

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Rename the file

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    newDestnew namefalseStringString
    oldDestold namefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert the result set to Excel#

    Interface address: /api/rest_j/v1/filesystem/resultsetToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Convert the result set to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetresult setqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitlimitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file typequeryfalsestring
    pathaddressqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Convert resultsets to Excel#

    Interface address:/api/rest_j/v1/filesystem/resultsetsToExcel

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    resultsets converted to Excel

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoquerytrueboolean
    limitlimitquerytrueref
    nullValuenull valuequerytruestring
    outputFileNameOutput file namequerytruestring
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save the script#

    Interface address:/api/rest_j/v1/filesystem/saveScript

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save script

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathaddresstrueStringString
    SaveScriptjsonbodytrueSaveScriptSaveScript
    charsetCharsetfalseStringString
    paramsPage SizefalseObjectObject
    scriptContentpage numberfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    upload#

    Interface address:/api/rest_j/v1/filesystem/upload

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Upload files, multiple files can be uploaded

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    filefileformDatafalseref
    pathaddressqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index 6ec00c70353..52bf820ae3d 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ Add Global Variable | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Add Global Variable

    VariableRestfulApi class

    add global variables#

    Interface address:/api/rest_j/v1/variable/saveGlobalVariable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add global variables

    Request example:

    {    globalVariables: [{        keyID: ,        key: "",        valueID: ,        value: ""    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    globalVariablesAdded parameter data one-to-many key:globalVariables,value:ListMaptrueMap
    keyParameter name, belonging to globalVariablesStringtrueString
    valuevariable value, and key belong to the key-value pair that is contained by globalVariablesListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/variable/saveGlobalVariable",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index 85300a1ab9f..b58f38bd3cf 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ Admin Console Home Page Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Admin Console Home Page Interface

    QueryRestfulApi class

    admin authentication#

    Interface address:/api/rest_j/v1/jobhistory/governanceStationAdmin

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Used to verify whether it is an administrator, if it is, it will return true if it is not false

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    data: {        solution: null,        admin: true    }    message: "OK"    method: "/api/jobhistory/governanceStationAdmin"    status: 0}

    global history#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Acquire global historical data list according to conditions and get all by default

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorCreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    executeApplicationNameoperatorqueryfalsestring
    isAdminViewWhether it is in administrator mode or normal modequeryfalseboolean
    pageSizeNumber of pagesqueryfalseref
    proxyUserProxy Userqueryfalsestring
    startDateStart timequeryfalseinteger(int64)
    statusend timequeryfalsestring
    taskIDIDqueryfalseinteger(int64)
    tpageNowpage numberqueryfalseref
    pageNowpageNowqueryfalseinteger(int32)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    {        "method": "/api/jobhistory/list",        "status": 0,        "message": "OK",        "data": {            "solution": null,            "totalPage": 90,            "tasks": [{                "taskID": ,                "instance": ",                "execId": "",                "umUser": "",                "engineInstance": null,                "executionCode": "",                "progress": "",                "logPath": "",                "resultLocation": "",                "status": "",                "createdTime": ,                "updatedTime": ,                "engineType": "",                "errCode": 0,                "errDesc": "",                "executeApplicationName": "",                "requestApplicationName": "",                "runType": "datachecker",                "paramsJson": "",                "costTime": 1000,                "strongerExecId": "",                "sourceJson": "",                "sourceTailor": "",                "engineStartTime": null,                "labels": [],                "canRetry": ,                "subJobs":            }]        }    }}

    list undo#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Undo list

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    endDateEnd timequeryfalseinteger(int64)
    engineTypeengineTypequeryfalsestring
    pageNowpageNowqueryfalseref
    pageSizepageSizequeryfalseref
    startDateStart timequeryfalseref
    startTaskIDstartTaskIDqueryfalseinteger(int64)
    statusstatusqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    History details#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the detailed information of a history through the history ID

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idHistoryIdqueryfalseinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/jobhistory/1928730/get",    "status": 0,    "message": "OK",    "data": {        "task": {            "taskID": ,            "instance": "",            "execId": "",            "umUser": "",            "engineInstance": "",            "executionCode": "",            "progress": "",            "logPath": "",            "resultLocation": "",            "status": "",            "createdTime":,            "updatedTime": ,            "engineType": "",            "errCode": ,            "errDesc": "",            "executeApplicationName": "",            "requestApplicationName": "",            "runType": "hql",            "paramsJson": "",            "costTime": ,            "strongerExecId": "",            "sourceJson": "",            "sourceTailor": "",            "engineStartTime": null,            "labels": [""],            "canRetry": false,            "subJobs": null        }    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index 567a69446e3..96e541856f1 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ Instance Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Instance Management

    InstanceRestful class

    Microservice instance list#

    Interface address: /api/rest_j/v1/microservice/allInstance

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the list of microservice management module instances to get single or multiple default all

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "instances": [{            "id": ,            "updateTime": ,            "createTime": ,            "applicationName": ",            "instance": "",            "labels": [{                "stringValue": "",                "labelKey": "",                "feature": "",                "id": 5,                "labelValueSize": 0,                "modifiable": true,                "updateTime": ,                "createTime": ,                "featureKey": "",                "empty":            }]        }]    }}

    Get eurekaURL#

    Interface address: /api/rest_j/v1/microservice/eurekaURL

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    return eurekaURL

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "url": ""    }}

    Edit the microservice instance#

    Interface address: /api/rest_j/v1/microservice/instanceLabel

    Request method: PUT

    Request data type: application/json

    Response data type: */*

    Interface description:

    Edit or modify the instance in microservice management

    Request example:

    {    applicationName: "linkis-ps-cs"    instance: "bdpdws110004:9108"    labels: [{        labelKey: "route",        stringValue: "cs_2_dev"    }]}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    applicationNameEngine LabelStringfalseString
    instanceEngine instance nameStringfalseString
    labelKeyThe label in the added content belongs to the key in the map in the labels collectionStringfalseString
    labelsThe engine instance updates the parameter content, and the collection stores the map typeListfalseList
    stringValueThe value of the label in the added content belongs to the value in the map in the labels collectionStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "success",    "data": {        "labels": [{            "stringValue": "",            "labelKey": "",            "feature": null,            "modifiable": ,            "featureKey": "",            "empty":        }]    }}

    Modifiable label types#

    Interface address:/api/rest_j/v1/microservice/modifiableLabelKey

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of label types that can be modified, such as 'userCreator, route'

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {    "keyList": []    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index 0fd1ecc4a41..2e892bfc6ae 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ History Job Interface | Apache Linkis - + @@ -16,7 +16,7 @@ none

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "admin": true    }}

    getHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/{id}/get

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    ididpathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    listHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/list

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    taskIDtaskIDpathfalseLong
    executeApplicationNameexecuteApplicationNamepathfalsestring
    creatorcreatorpathfalsestring
    proxyUserproxyUserpathfalsestring
    isAdminViewisAdminViewpathfalseBoolean

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}

    listUndoneHistoryTask#

    Interface address:/api/rest_j/v1/jobhistory/listundone

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    startDatestartDatepathfalseLong
    endDateendDatepathfalseLong
    statusstatuspathfalsestring
    pageNowpageNowpathfalseInteger
    pageSizepageSizepathfalseInteger
    startTaskIDstartTaskIDpathfalseLong
    engineTypeengineTypepathfalsestring
    creatorcreatorpathfalsestring

    Sample Response:

    {    "method": null,        "status": 0,        "message": "OK",        "data": {            "tasks": [{                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            },            {                "taskID": 2,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "Running",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"            }],            "totalPage": 1    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html index 4bb321a6b04..d0e39224333 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis Error Codes | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis Error Codes

    LinkisErrorCodeRestful class

    Get Linkis error code#

    Interface address:/api/rest_j/v1/errorcode/getAllErrorCodes

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get Linkis error code list

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index 65d365c4729..7428bf6b2f4 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq Table Interface | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Mdq Table Interface

    MdqTableRestfulApi class

    Activate table operations#

    Interface address:/api/rest_j/v1/datasource/active

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Activate table operation

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableIdTable IDqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Generate the DDL statement for the new library table#

    Interface address:/api/rest_j/v1/datasource/displaysql

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Generate DDL statement for new library table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    tableTableStringfalseString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get partition statistics#

    Interface address:/api/rest_j/v1/datasource/getPartitionStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get partition statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    partitionSortPartition SortStringfalseString
    tableNametable namequeryfalsestring
    partitionPathpartitionPathqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table information#

    Interface address:/api/rest_j/v1/datasource/getTableBaseInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table field information#

    Interface address:/api/rest_j/v1/datasource/getTableFieldsInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table field information

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Get table statistics#

    Interface address:/api/rest_j/v1/datasource/getTableStatisticInfo

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get table statistics

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    databaseDatasourcequeryfalsestring
    pageNowpage numberqueryfalsestring
    pageSizepage sizequeryfalsestring
    partitionSortPartition Sortqueryfalsestring
    tableNametable namequeryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Active ID#

    Interface address:/api/rest_j/v1/datasource/persistTable

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Activated logo

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    tableTablefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index 34658f912f1..2c6284ca5f4 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ MetadataCoreRestful | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    MetadataCoreRestful

    getColumns#

    Interface address: /api/rest_j/v1/metadatamanager/columns/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the column information of the data table

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "columns": [            {                "index": 1,                "primaryKey": true,                "name": "id",                "type": "INT"            },            {                "index": 2,                "primaryKey": false,                "name": "datasource_name",                "type": "VARCHAR"            },            {                "index": 3,                "primaryKey": false,                "name": "datasource_desc",                "type": "VARCHAR"            },            {                "index": 4,                "primaryKey": false,                "name": "datasource_type_id",                "type": "INT"            },            {                "index": 5,                "primaryKey": false,                "name": "create_identify",                "type": "VARCHAR"            },            {                "index": 6,                "primaryKey": false,                "name": "create_system",                "type": "VARCHAR"            },            {                "index": 7,                "primaryKey": false,                "name": "parameter",                "type": "VARCHAR"            },            {                "index": 8,                "primaryKey": false,                "name": "create_time",                "type": "DATETIME"            },            {                "index": 9,                "primaryKey": false,                "name": "modify_time",                "type": "DATETIME"            },            {                "index": 10,                "primaryKey": false,                "name": "create_user",                "type": "VARCHAR"            },            {                "index": 11,                "primaryKey": false,                "name": "modify_user",                "type": "VARCHAR"            },            {                "index": 12,                "primaryKey": false,                "name": "labels",                "type": "VARCHAR"            },            {                "index": 13,                "primaryKey": false,                "name": "version_id",                "type": "INT"            },            {                "index": 14,                "primaryKey": false,                "name": "expire",                "type": "TINYINT"            },            {                "index": 15,                "primaryKey": false,                "name": "published_version_id",                "type": "INT"            }        ]    }}

    getDatabases#

    Interface address:/api/rest_j/v1/metadatamanager/dbs/{dataSourceId}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description: Get the list of database names of the data source

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    systemsystemquerytruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "dbs": [            "information_schema",            "linkis",            "linkis_sit"        ]    }}

    getPartitions#

    Interface address:/api/rest_j/v1/metadatamanager/partitions/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring
    traversetraversequeryfalseboolean

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "partKeys": [                "ds"            ],            "root": {}        }    }}

    getTableProps#

    Interface address:/api/rest_j/v1/metadatamanager/props/{dataSourceId}/db/{database}/table/{table}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: application/json

    Interface description:

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    dataSourceIddataSourceIdpathtruestring
    databasedatabasepathtruestring
    systemsystemquerytruestring
    tabletablepathtruestring

    Sample Response:

    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "props": {            "skip.header.line.count": "1",            "columns.types": "int:int:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string:string",            "columns": "id,age,job,marital,education,default,balance,housing,loan,contact,day,month,duration,campaign,pdays,previous,poutcome,y",            "field.delim": ",",            "transient_lastDdlTime": "1646732554",            "partition_columns.types": "string",            "columns.comments": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000",            "bucket_count": "-1",            "serialization.ddl": "struct demo_data { i32 id, i32 age, string job, string marital, string education, string default, string balance, string housing, string loan, string contact, string day, string month, string duration, string campaign, string pdays, string previous, string poutcome, string y}",            "file.outputformat": "org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat",            "partition_columns": "ds",            "colelction.delim": "-",            "serialization.lib": "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe",            "name": "dss_autotest.demo_data",            "location": "hdfs://bdpdev01/user/hive/warehouse/hadoop/dss_autotest.db/demo_data",            "mapkey.delim": ":",            "file.inputformat": "org.apache.hadoop.mapred.TextInputFormat",            "serialization.format": ",",            "column.name.delimiter": ","        }    }}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index faabe794549..ee7d87a7a02 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ Parameter Configuration | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Parameter Configuration

    ConfigurationRestfulApi class

    Add KeyForEngine#

    Interface address:/api/rest_j/v1/configuration/addKeyForEngine

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Add KeyForEngine

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    engineTypeengineTypequeryfalsestring
    keyJsonkeyJsonqueryfalsestring
    tokentokenqueryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Add application type#

    Interface address:/api/rest_j/v1/configuration/createFirstCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add application type tag

    Request example:

    {    "categoryName": "",    "description": ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryNameReference type label namefalseStringString
    descriptionDescriptionfalseStringSTRing

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createFirstCategory",    "status": 0,    "message": "OK",    "data": {}}

    Add parameter configuration#

    Interface address:/api/rest_j/v1/configuration/createSecondCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Add parameter configuration

    Request example:

    {    categoryId: ,    description: "",    engineType: "",    version: ""}

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryIdParameter ConfigurationIdtrueStringString
    descriptionDescriptiontrueStringString
    engineTypeEngine TypetrueStringString
    versionversion numbertrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/createSecondCategory",    "status": 0,    "message": "OK",    "data": {}}

    delete configuration#

    Interface address: /api/rest_j/v1/configuration/deleteCategory

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete parameter configuration

    Request example:

    {    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdParameter ConfigurationIdStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/deleteCategory",    "status": 0,    "message": "OK",    "data": {}}

    Engine type list#

    Interface address:/api/rest_j/v1/configuration/engineType

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get a list of engine types

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/engineType",    "status": 0,    "message": "OK",    "data": {    "engineType": []    }}

    App types#

    Interface address: /api/rest_j/v1/configuration/getCategory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Apply type tag in parameter configuration

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getCategory",    "status": 0,    "message": "OK",    "data": {        "Category": [{            "categoryId": ,            "labelId": ,            "categoryName": "",            "childCategory": [],            "description": null,            "tag": null,            "createTime": ,            "updateTime": ,            "level": ,            "fatherCategoryName": ""        }],        "description": null,        "tag": null,        "createTime": ,        "updateTime": ,        "level": ,        "fatherCategoryName":    }]}}

    queue resources#

    Interface address:/api/rest_j/v1/configuration/getFullTreesByAppName

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    The queue resource module in the parameter configuration returns the column and value of the queue resource

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorlabel namequeryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/getFullTreesByAppName",    "status": 0,    "message": "OK",    "data": {        "fullTree": [{            "name": "Queue Resource",            "description": null,            "settings": [{                "id": ,                "key": "",                "description": "",                "name": "",                "defaultValue": "",                "validateType": "",                "validateRange": "[]",                "level": 1,                "engineType": ,                "treeName": "",                "valueId": ,                "configValue": "",                "configLabelId": ,                "unit": null,                "isUserDefined": ,                "hidden": ,                "advanced":            }]        }]    }}

    Get key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get key value

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    configKeyconfigKeyquerytruestring
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    save key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    configValueconfigValuetrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString
    SaveKeyValuejsonbodytrueSaveKeyValueSaveKeyValue

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    delete key value#

    Interface address:/api/rest_j/v1/configuration/keyvalue

    Request method: DELETE

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Delete key value

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    configKeyconfigKeytrueStringString
    creatorcreatortrueStringString
    engineTypeengineTypetrueStringString
    versionversiontrueStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    204No Content
    401Unauthorized
    403Forbidden

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    rpc test#

    Interface address: /api/rest_j/v1/configuration/rpcTest

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    rpc test

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorcreatorqueryfalsestring
    engineTypeengineTypequeryfalsestring
    usernameusernamequeryfalsestring
    versionversionqueryfalsestring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "data": {},    "message": "",    "method": "",    "status": 0}

    Save queue resources#

    Interface address:/api/rest_j/v1/configuration/saveFullTree

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Save queue resources

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    creatorApp Type NameStringtrueString
    descriptionDescription, belonging to the content in fullTreeStringtrueString
    engineTypeEngine TypeStringtrueString
    fullTreeDetails under Application TypeListtrueList
    nameQueue resource name, which belongs to the content in fullTreeStringtrueString
    settingsDetailed content in the queue resource, belonging to the content in fullTreeListtrueList

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/saveFullTree",    "status": 0,    "message": "OK",    "data": {}}

    Update category information#

    Interface address: /api/rest_j/v1/configuration/updateCategoryInfo

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Update category information

    Sample Response:

    {    description: "",    categoryId:}

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    categoryIdcategoryIdStringtrueString
    descriptiondescriptionStringtrueString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {    "method": "/api/configuration/updateCategoryInfo",    "status": 0,    "message": "OK",    "data": {}}
    - + \ No newline at end of file diff --git a/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html b/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html index e8f8421cd8d..f3dc5b60dd6 100644 --- a/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF Operations Management | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    UDF Operations Management

    UDFApi class

    new#

    Interface address:/api/rest_j/v1/udf/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    clusterNameclusterNamefalseStringString
    createTimeCreateTimefalseDateDate
    createUserCreatorfalseStringString
    descriptionDescriptionfalseStringString
    directoryCategory, personal function first-level directoryfalseStringString
    isExpireis invalidfalseBooleanBoolean
    isLoadWhether to loadfalseBooleanBoolean
    isSharedSharedfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    syssysfalseStringString
    treeIdtreeIdfalseLongLong
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    updateTimeUpdate timefalseDateDate
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf tree menu#

    Interface address:/api/rest_j/v1/udf/all

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Get detailed information of udf tree menu

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    pathRequest PathfalseStringString
    jsonStringjsonStringfalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get udf user list#

    Interface address:/api/rest_j/v1/udf/allUdfUsers

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get udf user list

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    confirmed#

    Interface address: /api/rest_j/v1/udf/authenticate

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Prove...is real

    Request Parameters:

    No

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    delete#

    Interface address:/api/rest_j/v1/udf/delete/{id}

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    udf file download to local#

    Interface address:/api/rest_j/v1/udf/downloadToLocal

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Download UDF file to local according to version parameters

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF View source code#

    Interface address:/api/rest_j/v1/udf/downloadUdf

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF view source code

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Publish#

    Interface address:/api/rest_j/v1/udf/publish

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF version released

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    fallback version#

    Interface address:/api/rest_j/v1/udf/rollback

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Back to version

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseinteger
    versionversionfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    set expiration#

    Interface address:/api/rest_j/v1/udf/setExpire

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    Setting expired

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseLongLong

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    UDF sharing#

    Interface address: /api/rest_j/v1/udf/shareUDF

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF sharing

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    sharedUserssharedUsersfalseListList
    udfInfoudfInfofalseUDFInfoUDFInfo

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree new#

    Interface address:/api/rest_j/v1/udf/tree/add

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree added

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree delete#

    Interface address:/api/rest_j/v1/udf/tree/delete/{id}

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    tree delete

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    ididfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    tree update#

    Interface address:/api/rest_j/v1/udf/tree/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    tree update

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categorycategoryfalseStringString
    childrenschildrensfalseListList
    clusterNameclusterNamefalseStringString
    createTimecreateTimefalseDateDate
    descriptiondescriptionfalseStringString
    ididfalseLongLong
    namenamefalseStringString
    parentparentfalseLongLong
    udfInfosudfInfosfalseListList
    updateTimeupdateTimefalseDateDate
    userNameuserNamefalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    renew#

    Interface address:/api/rest_j/v1/udf/update

    Request method: POST

    Request data type: application/json

    Response data type: */*

    Interface description:

    UDF modification

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    descriptionDescriptionfalseStringString
    ididfalseLongLong
    isLoadWhether to loadfalseBooleanBoolean
    pathOnly store the last uploaded path of the user for promptingfalseStringString
    registerFormatregister execution addressfalseStringString
    udfNameudfNamefalseStringString
    udfTypeudfTypefalseIntegerInteger
    useFormatUse FormatfalseStringString

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    201Created
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    Get user directory#

    Interface address: /api/rest_j/v1/udf/userDirectory

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    Get the first-level classification of the user's personal function

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    categoryGet the user directory of the specified collection type, if the type is UDF, get the user directory under this typefalsestringstring

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}

    version list#

    Interface address:/api/rest_j/v1/udf/versionList

    Request method: GET

    Request data type: application/x-www-form-urlencoded

    Response data type: */*

    Interface description:

    View version list

    Request Parameters:

    Parameter nameParameter descriptionRequiredRequest typeData typeschema
    udfIdudfIdfalseintegerinteger(int64)

    Response Status:

    Status codeDescriptionschema
    200OKMessage
    401Unauthorized
    403Forbidden
    404Not Found

    Response parameters:

    parameter nameparameter descriptiontypeschema
    dataDatasetobject
    messageDescriptionstring
    methodrequest urlstring
    statusStatusinteger(int32)integer(int32)

    Sample Response:

    {"data": {},"message": "","method": "","status": 0}
    - + \ No newline at end of file diff --git a/docs/latest/api/jdbc_api/index.html b/docs/latest/api/jdbc_api/index.html index f4bc1f8f0b3..bd1aa6750f7 100644 --- a/docs/latest/api/jdbc_api/index.html +++ b/docs/latest/api/jdbc_api/index.html @@ -7,7 +7,7 @@ Task Submission And Execution Of JDBC API | Apache Linkis - + @@ -19,7 +19,7 @@ //3. Create statement and execute query Statement st= connection.createStatement(); ResultSet rs=st.executeQuery("show tables"); //4. Processing the returned results of the database (using the ResultSet class) while (rs.next()) { ResultSetMetaData metaData = rs.getMetaData(); for (int i = 1; i <= metaData.getColumnCount(); i++) { System.out.print(metaData.getColumnName(i) + ":" +metaData.getColumnTypeName(i)+": "+ rs.getObject(i) + " "); } System.out.println(); } // close resourse rs.close(); st.close(); connection.close(); }
    - + \ No newline at end of file diff --git a/docs/latest/api/linkis_task_operator/index.html b/docs/latest/api/linkis_task_operator/index.html index 2844920929b..676926e4f2f 100644 --- a/docs/latest/api/linkis_task_operator/index.html +++ b/docs/latest/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ Task Submission and Execution Rest Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis Task submission and execution Rest API document

    • The return of the Linkis Restful interface follows the following standard return format:
    {  "method": "",  "status": 0,  "message": "",  "data": {}}

    Convention:

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: return status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returned is an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1. Submit task#

    • Interface /api/rest_j/v1/entrance/submit

    • Submission method POST

    • Request Parameters

    {  "executionContent": {    "code": "show tables",    "runType": "sql"  },  "params": {    "variable": {// task variable       "testvar": "hello"     },    "configuration": {      "runtime": {// task runtime params         "jdbc.url": "XX"      },      "startup": { // ec start up params         "spark.executor.cores": "4"      }    }  },  "source": { //task source information    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "spark-2.4.3",    "userCreator": "hadoop-IDE"  }}

    -Sample Response

    { "method": "/api/rest_j/v1/entrance/submit", "status": 0, "message": "Request executed successfully", "data": {   "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",   "taskID": "123" }}
    • execID is the unique identification execution ID generated for the task after the user task is submitted to Linkis. It is of type String. This ID is only useful when the task is running, similar to the concept of PID. The design of ExecID is (requestApplicationName length)(executeAppName length)(Instance length)${requestApplicationName}${executeApplicationName}${entranceInstance information ip+port}${requestApplicationName}_${umUser}_${index}

    • taskID is the unique ID that represents the task submitted by the user. This ID is generated by the database self-increment and is of Long type

    2. Get Status#

    • Interface /api/rest_j/v1/entrance/${execID}/status

    • Submission method GET

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/status", "status": 0, "message": "Get status successful", "data": {   "execID": "${execID}",   "status": "Running" }}

    3. Get Logs#

    • Interface /api/rest_j/v1/entrance/${execID}/log?fromLine=${fromLine}&size=${size}

    • Submission method GET

    • The request parameter fromLine refers to the number of lines from which to get, and size refers to the number of lines of logs that this request gets

    • Sample Response, where the returned fromLine needs to be used as a parameter for the next request of this interface

    {  "method": "/api/rest_j/v1/entrance/${execID}/log",  "status": 0,  "message": "Return log information",  "data": {    "execID": "${execID}",  "log": ["error log","warn log","info log", "all log"],  "fromLine": 56  }}

    4. Get Progress and resource#

    • Interface /api/rest_j/v1/entrance/${execID}/progressWithResource

    • Submission method GET

    • Sample Response

    {  "method": "/api/entrance/exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2/progressWithResource",  "status": 0,  "message": "OK",  "data": {    "yarnMetrics": {      "yarnResource": [        {          "queueMemory": 9663676416,          "queueCores": 6,          "queueInstances": 0,          "jobStatus": "COMPLETED",          "applicationId": "application_1655364300926_69504",          "queue": "default"        }      ],      "memoryPercent": 0.009,      "memoryRGB": "green",      "coreRGB": "green",      "corePercent": 0.02    },    "progress": 0.5,    "progressInfo": [      {        "succeedTasks": 4,        "failedTasks": 0,        "id": "jobId-1(linkis-spark-mix-code-1946915)",        "totalTasks": 6,        "runningTasks": 0      }    ],    "execID": "exec_id018017linkis-cg-entrance127.0.0.1:9205IDE_hadoop_spark_2"  }}

    5. Kill Task#

    • Interface /api/rest_j/v1/entrance/${execID}/kill

    • Submission method POST

    • Sample Response

    { "method": "/api/rest_j/v1/entrance/{execID}/kill", "status": 0, "message": "OK", "data": {   "execID":"${execID}"  }}

    6. Get task info#

    • Interface /api/rest_j/v1/jobhistory/{id}/get

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    idtask idpathtruestring
    • Sample Response
    {    "method": null,    "status": 0,    "message": "OK",    "data": {        "task": {                "taskID": 1,                "instance": "xxx",                "execId": "exec-id-xxx",                "umUser": "test",                "engineInstance": "xxx",                "progress": "10%",                "logPath": "hdfs://xxx/xxx/xxx",                "resultLocation": "hdfs://xxx/xxx/xxx",                "status": "FAILED",                "createdTime": "2019-01-01 00:00:00",                "updatedTime": "2019-01-01 01:00:00",                "engineType": "spark",                "errorCode": 100,                "errDesc": "Task Failed with error code 100",                "executeApplicationName": "hello world",                "requestApplicationName": "hello world",                "runType": "xxx",                "paramJson": "{\"xxx\":\"xxx\"}",                "costTime": 10000,                "strongerExecId": "execId-xxx",                "sourceJson": "{\"xxx\":\"xxx\"}"        }    }}

    7. Get result set info#

    Support for multiple result sets

    • Interface /api/rest_j/v1/filesystem/getDirFileTrees

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult directoryquerytruestring
    • Sample Response
    {  "method": "/api/filesystem/getDirFileTrees",  "status": 0,  "message": "OK",  "data": {    "dirFileTrees": {      "name": "1946923",      "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923",      "properties": null,      "children": [        {          "name": "_0.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_0.dolphin",//result set 1          "properties": {            "size": "7900",            "modifytime": "1657113288360"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        },        {          "name": "_1.dolphin",          "path": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923/_1.dolphin",//result set 2          "properties": {            "size": "7900",            "modifytime": "1657113288614"          },          "children": null,          "isLeaf": true,          "parentPath": "hdfs:///tmp/hadoop/linkis/2022-07-06/211446/IDE/1946923"        }      ],      "isLeaf": false,      "parentPath": null    }  }}

    8. Get result content#

    • Interface /api/rest_j/v1/filesystem/openFile

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    pathresult pathquerytruestring
    charsetCharsetqueryfalsestring
    pagepage numberqueryfalseref
    pageSizepage sizequeryfalseref
    • Sample Response
    {  "method": "/api/filesystem/openFile",  "status": 0,  "message": "OK",  "data": {    "metadata": [      {        "columnName": "count(1)",        "comment": "NULL",        "dataType": "long"      }    ],    "totalPage": 0,    "totalLine": 1,    "page": 1,    "type": "2",    "fileContent": [      [        "28"      ]    ]  }}

    9. Get Result by stream#

    Get the result as a CSV or Excel file

    • Interface /api/rest_j/v1/filesystem/resultsetToExcel

    • Submission method GET

    Request Parameters:

    Parameter nameParameter descriptionRequest typeRequiredData typeschema
    autoFormatAutoqueryfalseboolean
    charsetcharsetqueryfalsestring
    csvSeeratorcsv Separatorqueryfalsestring
    limitrow limitqueryfalseref
    nullValuenull valuequeryfalsestring
    outputFileNameOutput file namequeryfalsestring
    outputFileTypeOutput file type csv or excelqueryfalsestring
    pathresult pathqueryfalsestring
    quoteRetouchEnableWhether to quote modificationqueryfalseboolean
    sheetNamesheet namequeryfalsestring
    • Response
    binary stream

    10. Compatible with 0.x task submission interface#

    • Interface /api/rest_j/v1/entrance/execute

    • Submission method POST

    • Request Parameters
    {    "executeApplicationName": "hive", //Engine type    "requestApplicationName": "dss", //Client service type    "executionCode": "show tables",    "params": {      "variable": {// task variable         "testvar": "hello"      },      "configuration": {        "runtime": {// task runtime params           "jdbc.url": "XX"        },        "startup": { // ec start up params           "spark.executor.cores": "4"        }      }    },    "source": { //task source information      "scriptPath": "file:///tmp/hadoop/test.sql"    },    "labels": {      "engineType": "spark-2.4.3",      "userCreator": "hadoop-IDE"    },    "runType": "hql", //The type of script to run    "source": {"scriptPath":"file:///tmp/hadoop/1.hql"}}
    • Sample Response
    {  "method": "/api/rest_j/v1/entrance/execute",  "status": 0,  "message": "Request executed successfully",  "data": {    "execID": "030418IDEhivebdpdwc010004:10087IDE_hadoop_21",    "taskID": "123"  }}
    - + \ No newline at end of file diff --git a/docs/latest/api/login_api/index.html b/docs/latest/api/login_api/index.html index b0f3a8ff128..c9b0177c7f1 100644 --- a/docs/latest/api/login_api/index.html +++ b/docs/latest/api/login_api/index.html @@ -7,7 +7,7 @@ Login Api | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Login Document

    1. Docking With LDAP Service#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Add LDAP related configuration:

    wds.linkis.ldap.proxy.url=ldap://127.0.0.1:389/ #LDAP service URLwds.linkis.ldap.proxy.baseDN=dc=webank,dc=com #Configuration of LDAP service    

    2. How To Open The Test Mode To Achieve Login-Free#

    Enter the /conf/linkis-spring-cloud-services/linkis-mg-gateway directory and execute the command:

        vim linkis-server.properties

    Turn on the test mode and the parameters are as follows:

        wds.linkis.test.mode=true   # Open test mode    wds.linkis.test.user=hadoop  # Specify which user to delegate all requests to in test mode

    3.Log In Interface Summary#

    We provide the following login-related interfaces:

    • Login In

    • Login Out

    • Heart Beat

    4. Interface details#

    • The return of the Linkis Restful interface follows the following standard return format:
    { "method": "", "status": 0, "message": "", "data": {}}

    Protocol

    • method: Returns the requested Restful API URI, which is mainly used in WebSocket mode.
    • status: returns status information, where: -1 means no login, 0 means success, 1 means error, 2 means verification failed, 3 means no access to the interface.
    • data: return specific data.
    • message: return the requested prompt message. If the status is not 0, the message returns an error message, and the data may have a stack field, which returns specific stack information.

    For more information about the Linkis Restful interface specification, please refer to: Linkis Restful Interface Specification

    1). Login In#

    • Interface /api/rest_j/v1/user/login

    • Submission method POST

          {        "userName": "",        "password": ""      }
    • Return to example
        {        "method": null,        "status": 0,        "message": "login successful(登录成功)!",        "data": {            "isAdmin": false,            "userName": ""        }     }

    Among them:

    -isAdmin: Linkis only has admin users and non-admin users. The only privilege of admin users is to support viewing the historical tasks of all users in the Linkis management console.

    2). Login Out#

    • Interface /api/rest_j/v1/user/logout

    • Submission method POST

      No parameters

    • Return to example

        {        "method": "/api/rest_j/v1/user/logout",        "status": 0,        "message": "Logout successful(退出登录成功)!"    }

    3). Heart Beat#

    • Interface /api/rest_j/v1/user/heartbeat

    • Submission method POST

      No parameters

    • Return to example

        {         "method": "/api/rest_j/v1/user/heartbeat",         "status": 0,         "message": "Maintain heartbeat success(维系心跳成功)!"    }
    - + \ No newline at end of file diff --git a/docs/latest/api/overview/index.html b/docs/latest/api/overview/index.html index bf47701196f..ff9837e8e66 100644 --- a/docs/latest/api/overview/index.html +++ b/docs/latest/api/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Overview

    1. Document description#

    Linkis1.0 has been refactored and optimized on the basis of Linkix0.x, and it is also compatible with the 0.x interface. However, in order to prevent compatibility problems when using version 1.0, you need to read the following documents carefully:

    1. When using Linkis1.0 for customized development, you need to use Linkis's authorization authentication interface. Please read Login API Document carefully.

    2. Linkis1.0 provides a JDBC interface. You need to use JDBC to access Linkis. Please read Task Submit and Execute JDBC API Document.

    3. Linkis1.0 provides the Rest interface. If you need to develop upper-level applications on the basis of Linkis, please read Task Submit and Execute Rest API Document.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/commons/message_scheduler/index.html b/docs/latest/architecture/commons/message_scheduler/index.html index 7db3a8afed6..910c5838bcc 100644 --- a/docs/latest/architecture/commons/message_scheduler/index.html +++ b/docs/latest/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler Module | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Message Scheduler Module

    1 Overview#

            Linkis-RPC can realize the communication between microservices. In order to simplify the use of RPC, Linkis provides the Message-Scheduler module, which is annotated by @Receiver Analyze, identify and call. At the same time, it also unifies the use of RPC and Restful interfaces, which has better scalability.

    2. Architecture description#

    2.1. Architecture design diagram#

    Module Design Drawing

    2.2. Module description#

    • ServiceParser: Parse the (Object) object of the Service module, and encapsulate the @Receiver annotated method into the ServiceMethod object.
    • ServiceRegistry: Register the corresponding Service module, and store the ServiceMethod parsed by the Service in the Map container.
    • ImplicitParser: parse the object of the Implicit module, and the method annotated with @Implicit will be encapsulated into the ImplicitMethod object.
    • ImplicitRegistry: Register the corresponding Implicit module, and store the resolved ImplicitMethod in a Map container.
    • Converter: Start to scan the non-interface non-abstract subclass of RequestMethod and store it in the Map, parse the Restful and match the related RequestProtocol.
    • Publisher: Realize the publishing scheduling function, find the ServiceMethod matching the RequestProtocol in the Registry, and encapsulate it as a Job for submission scheduling.
    • Scheduler: Scheduling implementation, using Linkis-Scheduler to execute the job and return the MessageJob object.
    • TxManager: Complete transaction management, perform transaction management on job execution, and judge whether to commit or rollback after the job execution ends.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/commons/rpc/index.html b/docs/latest/architecture/commons/rpc/index.html index 549d36d85cf..48f3b309897 100644 --- a/docs/latest/architecture/commons/rpc/index.html +++ b/docs/latest/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC Module | Apache Linkis - + @@ -16,7 +16,7 @@ At the same time, because Feign only supports simple service selection rules, it cannot forward the request to the specified microservice instance, and cannot broadcast a request to all instances of the recipient microservice.

    2. Architecture description#

    2.1. Architecture design diagram#

    Linkis RPC architecture diagram

    2.2. Module description#

    The functions of the main modules are introduced as follows:

    • Eureka: service registration center, user management service, service discovery.
    • Sender: Service request interface, the sender uses Sender to request service from the receiver.
    • Receiver: The service request receives the corresponding interface, and the receiver responds to the service through this interface.
    • Interceptor: Sender will pass the user's request to the interceptor. The interceptor intercepts the request and performs additional functional processing on the request. The broadcast interceptor is used to broadcast operations on the request, the retry interceptor is used to retry the processing of failed requests, and the cache interceptor is used to read and cache simple and unchanged requests. , And the default interceptor that provides the default implementation.
    • Decoder, Encoder: used for request encoding and decoding.
    • Feign: is a lightweight framework for http request calls, a declarative WebService client program, used for Linkis-RPC bottom communication.
    • Listener: monitor module, mainly used to monitor broadcast requests.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index 32c458dc6e6..8f99f0fe251 100644 --- a/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ Start an EngineConn | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    How to add an EngineConn

    Adding EngineConn is one of the core processes of the computing task preparation phase of Linkis computing governance. It mainly includes the following steps. First, client side (Entrance or user client) initiates a request for a new EngineConn to LinkisManager . Then LinkisManager initiates a request to EngineConnManager to start EngineConn based on demands and label rules. Finally, LinkisManager returns the usable EngineConn to the client side.

    Based on the figure below, let's explain the whole process in detail:

    Process of adding a EngineConn

    1. LinkisManger receives the requests from client side#

    Glossary:

    • LinkisManager: The management center of Linkis computing governance capabilities. Its main responsibilities are:

      1. Based on multi-level combined tags, provide users with available EngineConn after complex routing, resource management and load balancing.

      2. Provide EC and ECM full life cycle management capabilities.

      3. Provide users with multi-Yarn cluster resource management functions based on multi-level combined tags. It is mainly divided into three modules: AppManager, ResourceManager and LabelManager , which can support multi-active deployment and have the characteristics of high availability and easy expansion.

    After the AM module receives the Client’s new EngineConn request, it first checks the parameters of the request to determine the validity of the request parameters. Secondly, selects the most suitable EngineConnManager (ECM) through complex rules for use in the subsequent EngineConn startup. Next, it will apply to RM for the resources needed to start the EngineConn, Finally, it will request the ECM to create an EngineConn.

    The four steps will be described in detail below.

    1. Request parameter verification#

    After the AM module receives the engine creation request, it will check the parameters. First, it will check the permissions of the requesting user and the creating user, and then check the Label attached to the request. Since in the subsequent creation process of AM, Label will be used to find ECM and perform resource information recording, etc, you need to ensure that you have the necessary Label. At this stage, you must bring the Label with UserCreatorLabel (For example: hadoop-IDE) and EngineTypeLabel ( For example: spark-2.4.3).

    2. Select a EngineConnManager(ECM)#

    ECM selection is mainly to complete the Label passed through the client to select a suitable ECM service to start EngineConn. In this step, first, the LabelManager will be used to search in the registered ECM through the Label passed by the client, and return in the order according to the label matching degree. After obtaining the registered ECM list, rules will be selected for these ECMs. At this stage, rules such as availability check, resource surplus, and machine load have been implemented. After the rule is selected, the ECM with the most matching label, the most idle resource, and the low load will be returned.

    3. Apply resources required for EngineConn#

    1. After obtaining the assigned ECM, AM will then request how many resources will be used by the client's engine creation request by calling the EngineConnPluginServer service. Here, the resource request will be encapsulated, mainly including Label, the EngineConn startup parameters passed by the Client, and the user configuration parameters obtained from the Configuration module. The resource information is obtained by calling the ECP service through RPC.

    2. After the EngineConnPluginServer service receives the resource request, it will first find the corresponding engine tag through the passed tag, and select the EngineConnPlugin of the corresponding engine through the engine tag. Then use EngineConnPlugin's resource generator to calculate the engine startup parameters passed in by the client, calculate the resources required to apply for a new EngineConn this time, and then return it to LinkisManager.

      Glossary:

    • EgineConnPlugin: It is the interface that Linkis must implement when connecting a new computing storage engine. This interface mainly includes several capabilities that this EngineConn must provide during the startup process, including EngineConn resource generator, EngineConn startup command generator, EngineConn engine connection Device. Please refer to the Spark engine implementation class for the specific implementation: SparkEngineConnPlugin.
    • EngineConnPluginServer: It is a microservice that loads all the EngineConnPlugins and provides externally the required resource generation capabilities of EngineConn and EngineConn's startup command generation capabilities.
    • EngineConnResourceFactory: Calculate the total resources needed when EngineConn starts this time through the parameters passed in.
    • EngineConnLaunchBuilder: Through the incoming parameters, a startup command of the EngineConn is generated to provide the ECM to start the engine.
    1. After AM obtains the engine resources, it will then call the RM service to apply for resources. The RM service will use the incoming Label, ECM, and the resources applied for this time to make resource judgments. First, it will judge whether the resources of the client corresponding to the Label are sufficient, and then judge whether the resources of the ECM service are sufficient, if the resources are sufficient, the resource application is approved this time, and the resources of the corresponding Label are added or subtracted.

    4. Request ECM for engine creation#

    1. After completing the resource application for the engine, AM will encapsulate the engine startup request, send it to the corresponding ECM via RPC for service startup, and obtain the instance object of EngineConn.
    2. AM will then determine whether EngineConn is successfully started and become available through the reported information of EngineConn. If it is, the result will be returned, and the process of adding an engine this time will end.

    2. ECM initiates EngineConn#

    Glossary:

    • EngineConnManager: EngineConn's manager. Provides engine life-cycle management, and at the same time reports load information and its own health status to RM.
    • EngineConnBuildRequest: The start engine command passed by LinkisManager to ECM, which encapsulates all tag information, required resources and some parameter configuration information of the engine.
    • EngineConnLaunchRequest: Contains the BML materials, environment variables, ECM required local environment variables, startup commands and other information required to start an EngineConn, so that ECM can build a complete EngineConn startup script based on this.

    After ECM receives the EngineConnBuildRequest command passed by LinkisManager, it is mainly divided into three steps to start EngineConn:

    1. Request EngineConnPluginServer to obtain EngineConnLaunchRequest encapsulated by EngineConnPluginServer.
    2. Parse EngineConnLaunchRequest and encapsulate it into EngineConn startup script.
    3. Execute startup script to start EngineConn.

    2.1 EngineConnPluginServer encapsulates EngineConnLaunchRequest#

    Get the EngineConn type and corresponding version that actually needs to be started through the label information of EngineConnBuildRequest, get the EngineConnPlugin of the EngineConn type from the memory of EngineConnPluginServer, and convert the EngineConnBuildRequest into EngineConnLaunchRequest through the EngineConnLaunchBuilder of the EngineConnPlugin.

    2.2 Encapsulate EngineConn startup script#

    After the ECM obtains the EngineConnLaunchRequest, it downloads the BML materials in the EngineConnLaunchRequest to the local, and checks whether the local necessary environment variables required by the EngineConnLaunchRequest exist. After the verification is passed, the EngineConnLaunchRequest is encapsulated into an EngineConn startup script.

    2.3 Execute startup script#

    Currently, ECM only supports Bash commands for Unix systems, that is, only supports Linux systems to execute the startup script.

    Before startup, the sudo command is used to switch to the corresponding requesting user to execute the script to ensure that the startup user (ie, JVM user) is the requesting user on the Client side.

    After the startup script is executed, ECM will monitor the execution status and execution log of the script in real time. Once the execution status returns to non-zero, it will immediately report EngineConn startup failure to LinkisManager and the entire process is complete; otherwise, it will keep monitoring the log and status of the startup script until The script execution is complete.

    3. EngineConn initialization#

    After ECM executed EngineConn's startup script, EngineConn microservice was officially launched.

    Glossary:

    • EngineConn microservice: Refers to the actual microservices that include an EngineConn and one or more Executors to provide computing power for computing tasks. When we talk about adding an EngineConn, we actually mean adding an EngineConn microservice.
    • EngineConn: The engine connector is the actual connection unit with the underlying computing storage engine, and contains the session information with the actual engine. The difference between it and Executor is that EngineConn only acts as a connection and a client, and does not actually perform calculations. For example, SparkEngineConn, its session information is SparkSession.
    • Executor: As a real computing storage scenario executor, it is the actual computing storage logic execution unit. It abstracts the various capabilities of EngineConn and provides multiple different architectural capabilities such as interactive execution, subscription execution, and responsive execution.

    The initialization of EngineConn microservices is generally divided into three stages:

    1. Initialize the EngineConn of the specific engine. First use the command line parameters of the Java main method to encapsulate an EngineCreationContext that contains relevant label information, startup information, and parameter information, and initialize EngineConn through EngineCreationContext to complete the establishment of the connection between EngineConn and the underlying Engine, such as: SparkEngineConn will initialize one at this stage SparkSession is used to establish a connection relationship with a Spark application.
    2. Initialize the Executor. After the EngineConn is initialized, the corresponding Executor will be initialized according to the actual usage scenario to provide service capabilities for subsequent users. For example, the SparkEngineConn in the interactive computing scenario will initialize a series of Executors that can be used to submit and execute SQL, PySpark, and Scala code capabilities, and support the Client to submit and execute SQL, PySpark, Scala and other codes to the SparkEngineConn.
    3. Report the heartbeat to LinkisManager regularly, and wait for EngineConn to exit. When the underlying engine corresponding to EngineConn is abnormal, or exceeds the maximum idle time, or Executor is executed, or the user manually kills, the EngineConn automatically ends and exits.

    At this point, The process of how to add a new EngineConn is basically over. Finally, let's make a summary:

    • The client initiates a request for adding EngineConn to LinkisManager.
    • LinkisManager checks the legitimacy of the parameters, first selects the appropriate ECM according to the label, then confirms the resources required for this new EngineConn according to the user's request, applies for resources from the RM module of LinkisManager, and requires ECM to start a new EngineConn as required after the application is passed.
    • ECM first requests EngineConnPluginServer to obtain an EngineConnLaunchRequest containing BML materials, environment variables, ECM required local environment variables, startup commands and other information needed to start an EngineConn, and then encapsulates the startup script of EngineConn, and finally executes the startup script to start the EngineConn.
    • EngineConn initializes the EngineConn of a specific engine, and then initializes the corresponding Executor according to the actual usage scenario, and provides service capabilities for subsequent users. Finally, report the heartbeat to LinkisManager regularly, and wait for the normal end or termination by the user.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html b/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html index 2383235bddd..cc0dc96a646 100644 --- a/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    EngineConn architecture design

    EngineConn: Engine connector, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    EngineConn architecture diagram

    EngineConn

    Introduction to the second-level module:

    linkis-computation-engineconn interactive engine connector#

    The ability to provide interactive computing tasks.

    Core classCore function
    EngineConnTaskDefines the interactive computing tasks submitted to EngineConn
    ComputationExecutorDefined interactive Executor, with interactive capabilities such as status query and task kill.
    TaskExecutionServiceProvides management functions for interactive computing tasks

    linkis-engineconn-common engine connector common module#

    Define the most basic entity classes and interfaces in the engine connector. EngineConn is used to create a connection session for the underlying computing storage engine, which contains the session information between the engine and the specific cluster, and is the client that communicates with the specific engine.

    Core ServiceCore function
    EngineCreationContextContains the context information of EngineConn during startup
    EngineConnContains the specific information of EngineConn, such as type, specific connection information with layer computing storage engine, etc.
    EngineExecutionProvide Executor creation logic
    EngineConnHookDefine the operations before and after each phase of engine startup

    The core logic of linkis-engineconn-core engine connector#

    Defines the interfaces involved in the core logic of EngineConn.

    Core classCore function
    EngineConnManagerProvide related interfaces for creating and obtaining EngineConn
    ExecutorManagerProvide related interfaces for creating and obtaining Executor
    ShutdownHookDefine the operation of the engine shutdown phase

    linkis-engineconn-launch engine connector startup module#

    Defines the logic of how to start EngineConn.

    Core classcore function
    EngineConnServerEngineConn microservice startup class

    The core logic of the linkis-executor-core executor#

    Defines the core classes related to the actuator. The executor is a real computing scene executor, responsible for submitting user code to EngineConn.

    Core classCore function
    ExecutorIt is the actual computational logic execution unit and provides a top-level abstraction of the various capabilities of the engine.
    EngineConnAsyncEventDefines EngineConn-related asynchronous events
    EngineConnSyncEventDefines EngineConn-related synchronization events
    EngineConnAsyncListenerDefines EngineConn related asynchronous event listener
    EngineConnSyncListenerDefines EngineConn related synchronization event listener
    EngineConnAsyncListenerBusDefines the listener bus for EngineConn asynchronous events
    EngineConnSyncListenerBusDefines the listener bus for EngineConn synchronization events
    ExecutorListenerBusContextDefines the context of the EngineConn event listener
    LabelServiceProvide label reporting function
    ManagerServiceProvides the function of information transfer with LinkisManager

    linkis-callback-service callback logic#

    Core ClassCore Function
    EngineConnCallbackDefine EngineConn's callback logic

    linkis-accessible-executor can be accessed executor#

    Executor that can be accessed. You can interact with it through RPC requests to get its status, load, concurrency and other basic indicators Metrics data.

    Core ClassCore Function
    LogCacheProvide log cache function
    AccessibleExecutorThe Executor that can be accessed can interact with it through RPC requests.
    NodeHealthyInfoManagerManage Executor's Health Information
    NodeHeartbeatMsgManagerManage the heartbeat information of Executor
    NodeOverLoadInfoManagerManage Executor load information
    ListenerProvides events related to Executor and the corresponding listener definition
    EngineConnTimedLockDefine Executor level lock
    AccessibleServiceProvides the start-stop and status acquisition functions of Executor
    ExecutorHeartbeatServiceProvides heartbeat related functions of Executor
    LockServiceProvide lock management function
    LogServiceProvide log management functions
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 4ae4fc20ef2..e2ef1ef41b4 100644 --- a/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager Design | Apache Linkis - + @@ -16,7 +16,7 @@ Core Service and Features module are as follows:

    Core serviceCore function
    EngineConnLaunchServiceContains core methods for generating EngineConn and starting the process
    BmlResourceLocallizationServiceUsed to download BML engine related resources and generate localized file directory
    ECMHealthServiceReport your own healthy heartbeat to AM regularly
    ECMMetricsServiceReport your own indicator status to AM regularly
    EngineConnKillSerivceProvides related functions to stop the engine
    EngineConnListServiceProvide caching and management engine related functions
    EngineConnCallBackServiceProvide the function of the callback engine
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 4fcdd680690..b4f815c76aa 100644 --- a/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin (ECP) Design | Apache Linkis - + @@ -17,7 +17,7 @@ Other services such as Manager call the logic of the corresponding plug-in in Plugin Server through RPC requests.

    Core ClassCore Function
    EngineConnLaunchServiceResponsible for building the engine connector launch request
    EngineConnResourceFactoryServiceResponsible for generating engine resources
    EngineConnResourceServiceResponsible for downloading the resource files used by the engine connector from BML

    EngineConn-Plugin-Loader Engine Connector Plugin Loader#

    The engine connector plug-in loader is a loader used to dynamically load the engine connector plug-ins according to request parameters, and has the characteristics of caching. The specific loading process is mainly composed of two parts: 1) Plug-in resources such as the main program package and program dependency packages are loaded locally (not open). 2) Plug-in resources are dynamically loaded from the local into the service process environment, for example, loaded into the JVM virtual machine through a class loader.

    Core ClassCore Function
    EngineConnPluginsResourceLoaderLoad engine connector plug-in resources
    EngineConnPluginsLoaderLoad the engine connector plug-in instance, or load an existing one from the cache
    EngineConnPluginClassLoaderDynamically instantiate engine connector instance from jar

    EngineConn-Plugin-Cache engine plug-in cache module#

    Engine connector plug-in cache is a cache service specially used to cache loaded engine connectors, and supports the ability to read, update, and remove. The plug-in that has been loaded into the service process will be cached together with its class loader to prevent multiple loading from affecting efficiency; at the same time, the cache module will periodically notify the loader to update the plug-in resources. If changes are found, it will be reloaded and refreshed automatically Cache.

    Core ClassCore Function
    EngineConnPluginCacheCache loaded engine connector instance
    RefreshPluginCacheContainerEngine connector that refreshes the cache regularly

    EngineConn-Plugin-Core: Engine connector plug-in core module#

    The engine connector plug-in core module is the core module of the engine connector plug-in. Contains the implementation of the basic functions of the engine plug-in, such as the construction of the engine connector start command, the construction of the engine resource factory and the implementation of the core interface of the engine connector plug-in.

    Core ClassCore Function
    EngineConnLaunchBuilderBuild Engine Connector Launch Request
    EngineConnFactoryCreate Engine Connector
    EngineConnPluginThe engine connector plug-in implements the interface, including resources, commands, and instance construction methods.
    EngineResourceFactoryEngine Resource Creation Factory

    EngineConn-Plugins: Engine connection plugin collection#

    The engine connection plug-in collection is used to place the default engine connector plug-in library that has been implemented based on the plug-in interface defined by us. Provides the default engine connector implementation, such as jdbc, spark, python, shell, etc. Users can refer to the implemented cases based on their own needs to implement more engine connectors.

    Core ClassCore Function
    engineplugin-jdbcjdbc engine connector
    engineplugin-shellShell engine connector
    engineplugin-sparkspark engine connector
    engineplugin-pythonpython engine connector
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/entrance/index.html b/docs/latest/architecture/computation_governance_services/entrance/index.html index b2c9b070f19..691c3cd12d5 100644 --- a/docs/latest/architecture/computation_governance_services/entrance/index.html +++ b/docs/latest/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Entrance Architecture Design

    The Links task submission portal is used to receive, schedule, forward execution requests, life cycle management services for computing tasks, and can return calculation results, logs, and progress to the caller. It is split from the Entrance of Linkis0.X Native capabilities.

    1. Entrance architecture diagram

    Introduction to the second-level module:

    EntranceServer#

    EntranceServer computing task submission portal service is the core service of Entrance, responsible for the reception, scheduling, execution status tracking, and job life cycle management of Linkis execution tasks. It mainly realizes the conversion of task execution requests into schedulable Jobs, scheduling, applying for Executor execution, job status management, result set management, log management, etc.

    Core ClassCore Function
    EntranceInterceptorEntrance interceptor is used to supplement the information of the incoming parameter task, making the content of this task more complete. The supplementary information includes: database information supplement, custom variable replacement, code inspection, limit restrictions, etc.
    EntranceParserThe Entrance parser is used to parse the request parameter Map into Task, and it can also convert Task into schedulable Job, or convert Job into storable Task.
    EntranceExecutorManagerEntrance executor management creates an Executor for the execution of EntranceJob, maintains the relationship between Job and Executor, and supports the labeling capabilities requested by Job
    PersistenceManagerPersistence management is responsible for job-related persistence operations, such as the result set path, job status changes, progress, etc., stored in the database.
    ResultSetEngineThe result set engine is responsible for the storage of the result set after the job is run, and it is saved in the form of a file to HDFS or a local storage directory.
    LogManagerLog Management is responsible for the storage of job logs and the management of log error codes.
    SchedulerThe job scheduler is responsible for the scheduling and execution of all jobs, mainly through scheduling job queues.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index 653caf14f78..64198602ada 100644 --- a/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job Submission | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Job submission, preparation and execution process

    The submission and execution of computing tasks (Job) is the core capability provided by Linkis. It almost colludes with all modules in the Linkis computing governance architecture and occupies a core position in Linkis.

    The whole process, starting at submitting user's computing tasks from the client and ending with returning final results, is divided into three stages: submission -> preparation -> executing. The details are shown in the following figure.

    The overall flow chart of computing tasks

    Among them:

    • Entrance, as the entrance to the submission stage, provides task reception, scheduling and job information forwarding capabilities. It is the unified entrance for all computing tasks. It will forward computing tasks to Orchestrator for scheduling and execution.

    • Orchestrator, as the entrance to the preparation phase, mainly provides job analysis, orchestration and execution capabilities.

    • Linkis Manager: The management center of computing governance capabilities. Its main responsibilities are as follows:

      1. ResourceManager:Not only has the resource management capabilities of Yarn and Linkis EngineConnManager, but also provides tag-based multi-level resource allocation and recovery capabilities, allowing ResourceManager to have full resource management capabilities across clusters and across computing resource types;
      2. AppManager: Coordinate and manage all EngineConnManager and EngineConn, including the life cycle of EngineConn application, reuse, creation, switching, and destruction to AppManager for management;
      3. LabelManager: Based on multi-level combined labels, it will provide label support for the routing and management capabilities of EngineConn and EngineConnManager across IDC and across clusters;
      4. EngineConnPluginServer: Externally provides the resource generation capabilities required to start an EngineConn and EngineConn startup command generation capabilities.
    • EngineConnManager: It is the manager of EngineConn, which provides engine life-cycle management, and at the same time reports load information and its own health status to RM.

    • EngineConn: It is the actual connector between Linkis and the underlying computing storage engines. All user computing and storage tasks will eventually be submitted to the underlying computing storage engine by EngineConn. According to different user scenarios, EngineConn provides full-stack computing capability framework support for interactive computing, streaming computing, off-line computing, and data storage tasks.

    1. Submission Stage#

    The submission phase is mainly the interaction of Client -> Linkis Gateway -> Entrance, and the process is as follows:

    Flow chart of submission phase

    1. First, the Client (such as the front end or the client) initiates a Job request, and the job request information is simplified as follows (for the specific usage of Linkis, please refer to How to use Linkis):
    POST /api/rest_j/v1/entrance/submit
    {     "executionContent": {"code": "show tables", "runType": "sql"},     "params": {"variable": {}, "configuration": {}}, //not required     "source": {"scriptPath": "file:///1.hql"}, //not required, only used to record code source     "labels": {         "engineType": "spark-2.4.3", //Specify engine         "userCreator": "username-IDE" // Specify the submission user and submission system     }}
    1. After Linkis-Gateway receives the request, according to the serviceName in the URI /api/rest_j/v1/${serviceName}/.+, it will confirm the microservice name for routing and forwarding. Here Linkis-Gateway will parse out the name as entrance and Job is forwarded to the Entrance microservice. It should be noted that if the user specifies a routing label, the Entrance microservice instance with the corresponding label will be selected for forwarding according to the routing label instead of random forwarding.
    2. After Entrance receives the Job request, it will first simply verify the legitimacy of the request, then use RPC to call JobHistory to persist the job information, and then encapsulate the Job request as a computing task, put it in the scheduling queue, and wait for it to be consumed by consumption thread.
    3. The scheduling queue will open up a consumption queue and a consumption thread for each group. The consumption queue is used to store the user computing tasks that have been preliminarily encapsulated. The consumption thread will continue to take computing tasks from the consumption queue for consumption in a FIFO manner. The current default grouping method is Creator + User (that is, submission system + user). Therefore, even if it is the same user, as long as it is a computing task submitted by different systems, the actual consumption queues and consumption threads are completely different, and they are completely isolated from each other. (Reminder: Users can modify the grouping algorithm as needed)
    4. After the consuming thread takes out the calculation task, it will submit the calculation task to Orchestrator, which officially enters the preparation phase.

    2. Preparation Stage#

    There are two main processes in the preparation phase. One is to apply for an available EngineConn from LinkisManager to submit and execute the following computing tasks. The other is Orchestrator to orchestrate the computing tasks submitted by Entrance, and to convert a user's computing request into a physical execution tree and handed over to the execution phase where a computing task actually being executed.

    2.1 Apply to LinkisManager for available EngineConn#

    If the user has a reusable EngineConn in LinkisManager, the EngineConn is directly locked and returned to Orchestrator, and the entire application process ends.

    How to define a reusable EngineConn? It refers to those that can match all the label requirements of the computing task, and the EngineConn's own health status is Healthy (the load is low and the actual status is Idle). Then, all the EngineConn that meets the conditions are sorted and selected according to the rules, and finally the best one is locked.

    If the user does not have a reusable EngineConn, a process to request a new EngineConn will be triggered at this time. Regarding the process, please refer to: How to add an EngineConn.

    2.2 Orchestrate a computing task#

    Orchestrator is mainly responsible for arranging a computing task (JobReq) into a physical execution tree (PhysicalTree) that can be actually executed, and providing the execution capabilities of the Physical tree.

    Here we first focus on Orchestrator's computing task scheduling capabilities. A flow chart is shown below:

    Orchestration flow chart

    The main process is as follows:

    • Converter: Complete the conversion of the JobReq (task request) submitted by the user to Orchestrator's ASTJob. This step will perform parameter check and information supplementation on the calculation task submitted by the user, such as variable replacement, etc.
    • Parser: Complete the analysis of ASTJob. Split ASTJob into an AST tree composed of ASTJob and ASTStage.
    • Validator: Complete the inspection and information supplement of ASTJob and ASTStage, such as code inspection, necessary Label information supplement, etc.
    • Planner: Convert an AST tree into a Logical tree. The Logical tree at this time has been composed of LogicalTask, which contains all the execution logic of the entire computing task.
    • Optimizer: Convert a Logical tree to a Physical tree and optimize the Physical tree.

    In a physical tree, the majority of nodes are computing strategy logic. Only the middle ExecTask truly encapsulates the execution logic which will be further submitted to and executed at EngineConn. As shown below:

    Physical Tree

    Different computing strategies have different execution logics encapsulated by JobExecTask and StageExecTask in the Physical tree.

    The execution logic encapsulated by JobExecTask and StageExecTask in the Physical tree depends on the specific type of computing strategy.

    For example, under the multi-active computing strategy, for a computing task submitted by a user, the execution logic submitted to EngineConn of different clusters for execution is encapsulated in two ExecTasks, and the related strategy logic is reflected in the parent node (StageExecTask(End)) of the two ExecTasks.

    Here, we take the multi-reading scenario under the multi-active computing strategy as an example.

    In multi-reading scenario, only one result of ExecTask is required to return. Once the result is returned , the Physical tree can be marked as successful. However, the Physical tree only has the ability to execute sequentially according to dependencies, and cannot terminate the execution of each node. Once a node is canceled or fails to execute, the entire Physical tree will be marked as failure. At this time, StageExecTask (End) is needed to ensure that the Physical tree can not only cancel the ExecTask that failed to execute, but also continue to upload the result set generated by the Successful ExecTask, and let the Physical tree continue to execute. This is the execution logic of computing strategy represented by StageExecTask.

    The orchestration process of Linkis Orchestrator is similar to many SQL parsing engines (such as Spark, Hive's SQL parser). But in fact, the orchestration capability of Linkis Orchestrator is realized based on the computing governance field for the different computing governance needs of users. The SQL parsing engine is a parsing orchestration oriented to the SQL language. Here is a simple distinction:

    1. What Linkis Orchestrator mainly wants to solve is the orchestration requirements caused by different computing tasks for computing strategies. For example, in order to be multi-active, Orchestrator will submit a calculation task for the user, based on the "multi-active" computing strategy requirements, compile a physical tree, so as to submit to multiple clusters to perform this calculation task. And in the process of constructing the entire Physical tree, various possible abnormal scenarios have been fully considered, and they have all been reflected in the Physical tree.
    2. The orchestration ability of Linkis Orchestrator has nothing to do with the programming language. In theory, as long as an engine has adapted to Linkis, all the programming languages it supports can be orchestrated, while the SQL parsing engine only cares about the analysis and execution of SQL, and is only responsible for parsing a piece of SQL into one executable Physical tree, and finally calculate the result.
    3. Linkis Orchestrator also has the ability to parse SQL, but SQL parsing is just one of Orchestrator Parser's analytic implementations for the SQL programming language. The Parser of Linkis Orchestrator also considers introducing Apache Calcite to parse SQL. It supports splitting a user SQL that spans multiple computing engines (must be a computing engine that Linkis has docked) into multiple sub SQLs and submitting them to each corresponding engine during the execution phase. Finally, a suitable calculation engine is selected for summary calculation.

    After the analysis and arrangement of Linkis Orchestrator, the computing task has been transformed into a executable physical tree. Orchestrator will submit the Physical tree to Orchestrator's Execution module and enter the final execution stage.

    3. Execution Stage#

    The execution stage is mainly divided into the following two steps, these two steps are the last two phases of capabilities provided by Linkis Orchestrator:

    Flow chart of the execution stage

    The main process is as follows:

    • Execution: Analyze the dependencies of the Physical tree, and execute them sequentially from the leaf nodes according to the dependencies.
    • Reheater: Once the execution of a node in the Physical tree is completed, it will trigger a reheat. Reheating allows the physical tree to be dynamically adjusted according to the real-time execution.For example: it is detected that a leaf node fails to execute, and it supports retry (if it is caused by throwing ReTryExecption), the Physical tree will be automatically adjusted, and a retry parent node with exactly the same content is added to the leaf node .

    Let us go back to the Execution stage, where we focus on the execution logic of the ExecTask node that encapsulates the user computing task submitted to EngineConn.

    1. As mentioned earlier, the first step in the preparation phase is to obtain a usable EngineConn from LinkisManager. After ExecTask gets this EngineConn, it will submit the user's computing task to EngineConn through an RPC request.
    2. After EngineConn receives the computing task, it will asynchronously submit it to the underlying computing storage engine through the thread pool, and then immediately return an execution ID.
    3. After ExecTask gets this execution ID, it can then use the ID to asynchronously pull the execution status of the computing task (such as: status, progress, log, result set, etc.).
    4. At the same time, EngineConn will monitor the execution of the underlying computing storage engine in real time through multiple registered Listeners. If the computing storage engine does not support registering Listeners, EngineConn will start a daemon thread for the computing task and periodically pull the execution status from the computing storage engine.
    5. EngineConn will pull the execution status back to the microservice where Orchestrator is located in real time through RCP request.
    6. After the Receiver of the microservice receives the execution status, it will broadcast it through the ListenerBus, and the Orchestrator Execution will consume the event and dynamically update the execution status of the Physical tree.
    7. The result set generated by the calculation task will be written to storage media such as HDFS at the EngineConn side. EngineConn returns only the result set path through RPC, Execution consumes the event, and broadcasts the obtained result set path through ListenerBus, so that the Listener registered by Entrance with Orchestrator can consume the result set path and write the result set path Persist to JobHistory.
    8. After the execution of the computing task on the EngineConn side is completed, through the same logic, the Execution will be triggered to update the state of the ExecTask node of the Physical tree, so that the Physical tree will continue to execute until the entire tree is completely executed. At this time, Execution will broadcast the completion status of the calculation task through ListenerBus.
    9. After the Entrance registered Listener with the Orchestrator consumes the state event, it updates the job state to JobHistory, and the entire task execution is completed.

    Finally, let's take a look at how the client side knows the state of the calculation task and obtains the calculation result in time, as shown in the following figure:

    Results acquisition process

    The specific process is as follows:

    1. The client periodically polls to request Entrance to obtain the status of the computing task.
    2. Once the status is flipped to success, it sends a request for job information to JobHistory, and gets all the result set paths.
    3. Initiate a query file content request to PublicService through the result set path, and obtain the content of the result set.

    Since then, the entire process of job submission -> preparation -> execution have been completed.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis-cli/index.html b/docs/latest/architecture/computation_governance_services/linkis-cli/index.html index 31820c35353..801ebf186f8 100644 --- a/docs/latest/architecture/computation_governance_services/linkis-cli/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis-Client Architecture Design | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis-Client Architecture Design

    Provide users with a lightweight client that submits tasks to Linkis for execution.

    Linkis-Client architecture diagram#

    img

    Second-level module introduction#

    Linkis-Computation-Client#

    Provides an interface for users to submit execution tasks to Linkis in the form of SDK.

    Core ClassCore Function
    ActionDefines the requested attributes, methods and parameters included
    ResultDefines the properties of the returned result, the methods and parameters included
    UJESClientResponsible for request submission, execution, status, results and related parameters acquisition
    Linkis-Cli#

    Provides a way for users to submit tasks to Linkis in the form of a shell command terminal.

    Core ClassCore Function
    CommonDefines the parent class and interface of the instruction template parent class, the instruction analysis entity class, and the task submission and execution links
    CoreResponsible for parsing input, task execution and defining output methods
    ApplicationCall linkis-computation-client to perform tasks, and pull logs and final results in real time
    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 90c7d741182..62fcb79cc08 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ App Manager | Apache Linkis - + @@ -29,7 +29,7 @@ Engine manager: Engine manager is responsible for managing the basic information and metadata information of all engines.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 0f776a285e1..e7fa6acaaf4 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ Label Manager | Apache Linkis - + @@ -22,7 +22,7 @@ We set that the higher the proportion of candidate nodes associated with irrelevant labels in the total associated nodes, the more significant the impact on the score, which can further accumulate the initial score of the node obtained in the first step.
  • Normalize the standard deviation of the scores of the candidate nodes and sort them.
  • - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html index b78f918222e..826a39a2ce6 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ ResourceManager

    4. Monitoring module linkis-manager-monitor#

            Monitor provides the function of node status monitoring.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index d2792366439..984fd02280a 100644 --- a/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ Resource Manager | Apache Linkis - + @@ -25,7 +25,7 @@ url, Hadoop version and other information) are maintained in the linkis_external_resource_provider table.

  • For each resource type, there is an implementation of the ExternalResourceProviderParser interface, which parses the attributes of external resources, converts the information that can be matched to the Label into the corresponding Label, and converts the information that can be used as a parameter to request the resource interface into params . Finally, an ExternalResourceProvider instance that can be used as a basis for querying external resource information is constructed.

  • According to the resource type and label information in the parameters of the ExternalResourceService method, find the matching ExternalResourceProvider, generate an ExternalResourceRequest based on the information in it, and formally call the API provided by the external resource to initiate a resource information request.

  • - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/overview/index.html b/docs/latest/architecture/computation_governance_services/overview/index.html index 97cf44c01c4..79b178de46e 100644 --- a/docs/latest/architecture/computation_governance_services/overview/index.html +++ b/docs/latest/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -21,7 +21,7 @@ Enter EngineConn Architecture Design

    - + \ No newline at end of file diff --git a/docs/latest/architecture/computation_governance_services/proxy_user/index.html b/docs/latest/architecture/computation_governance_services/proxy_user/index.html index b34f532306d..5fe6bdbb5b2 100644 --- a/docs/latest/architecture/computation_governance_services/proxy_user/index.html +++ b/docs/latest/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -18,7 +18,7 @@
    • The relevant interface of linkis needs to be able to identify the proxy user information based on the original UserName obtained, and use the proxy user to perform various operations. And record the audit log, including the user's task execution operation, download operation
    • When the task is submitted for execution, the entry service needs to modify the executing user to be the proxy user

    5 Things to Consider & Note#

    • Users are divided into proxy users and non-proxy users. Users of proxy type cannot perform proxying to other users again.
    • It is necessary to control the list of logged-in users and system users who can be proxied, to prohibit the occurrence of arbitrary proxies, and to avoid uncontrollable permissions. It is best to support database tables to configure, and can be directly modified to take effect without restarting the service
    • Separately record log files containing proxy user operations, such as proxy execution, function update, etc. All proxy user operations of PublicService are recorded in the log, which is convenient for auditing
    - + \ No newline at end of file diff --git a/docs/latest/architecture/difference_between_1.0_and_0.x/index.html b/docs/latest/architecture/difference_between_1.0_and_0.x/index.html index d39543cd272..9efceed07e7 100644 --- a/docs/latest/architecture/difference_between_1.0_and_0.x/index.html +++ b/docs/latest/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Difference Between 1.0 And 0.x | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis EngineConn Architecture diagram

    - + \ No newline at end of file diff --git a/docs/latest/architecture/microservice_governance_services/gateway/index.html b/docs/latest/architecture/microservice_governance_services/gateway/index.html index dffd68bd5fe..b4c781345c7 100644 --- a/docs/latest/architecture/microservice_governance_services/gateway/index.html +++ b/docs/latest/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ Gateway Design | Apache Linkis - + @@ -26,7 +26,7 @@ Gateway WebSocket Forwarding

    - + \ No newline at end of file diff --git a/docs/latest/architecture/microservice_governance_services/overview/index.html b/docs/latest/architecture/microservice_governance_services/overview/index.html index b6678e0169b..8437cb85273 100644 --- a/docs/latest/architecture/microservice_governance_services/overview/index.html +++ b/docs/latest/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -31,7 +31,7 @@

    - + \ No newline at end of file diff --git a/docs/latest/architecture/overview/index.html b/docs/latest/architecture/overview/index.html index d4b05e77aad..9b8f66d8109 100644 --- a/docs/latest/architecture/overview/index.html +++ b/docs/latest/architecture/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Overview

    Linkis 1.0 divides all microservices into three categories: public enhancement services, computing governance services, and microservice governance services. The following figure shows the architecture of Linkis 1.0.

    Linkis1.0 Architecture Figure

    The specific responsibilities of each category are as follows:

    1. Public enhancement services are the material library services, context services, data source services and public services that Linkis 0.X has provided.
    2. The microservice governance services are Spring Cloud Gateway, Eureka and Open Feign already provided by Linkis 0.X, and Linkis 1.0 will also provide support for Nacos
    3. Computing governance services are the core focus of Linkis 1.0, from submission, preparation to execution, overall three stages to comprehensively upgrade Linkis' ability to perform control over user tasks.

    The following is a directory listing of Linkis1.0 architecture documents:

    1. The characteristics of Linkis1.0's architecture , please read The difference between Linkis1.0 and Linkis0.x.
    2. Linkis 1.0 public enhancement service related documents, please read Public Enhancement Service.
    3. Linkis 1.0 microservice governance related documents, please read Microservice Governance.
    4. Linkis 1.0 computing governance service related documents, please read Computation Governance Service.
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index c71df4e8652..1491fe6358f 100644 --- a/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ Analysis of engin BML | Apache Linkis - + @@ -17,7 +17,7 @@ taskDao.updateState(resourceTask.getId(), TaskState.RUNNING.getValue(), new Date());

    3) The actual writing of material files into the material library is completed by the upload method in the ResourceServiceImpl class. Inside the upload method, a set of byte streams corresponding to List<MultipartFile> files will be persisted to the material library file storage In the system; store the properties data of the material file in the resource record table (linkis_ps_bml_resources) and the resource version record table (linkis_ps_bml_resources_version).

    MultipartFile p = files[0]String resourceId = (String) properties.get("resourceId");String fileName =new String(p.getOriginalFilename().getBytes(Constant.ISO_ENCODE),                            Constant.UTF8_ENCODE);fileName = resourceId;String path = resourceHelper.generatePath(user, fileName, properties);// generatePath currently supports Local and HDFS paths, and the composition rules of paths are determined by LocalResourceHelper or HdfsResourceHelper// implementation of the generatePath method inStringBuilder sb = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, sb, true);// The file size calculation and the file byte stream writing to the file are implemented by the upload method in LocalResourceHelper or HdfsResourceHelperResource resource = Resource.createNewResource(resourceId, user, fileName, properties);// Insert a record into the resource table linkis_ps_bml_resourceslong id = resourceDao.uploadResource(resource);// Add a new record to the resource version table linkis_ps_bml_resources_version, the version number at this time is instant.FIRST_VERSION// In addition to recording the metadata information of this version, the most important thing is to record the storage location of the file of this version, including the file path, starting location, and ending location.String clientIp = (String) properties.get("clientIp");ResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(                            resourceId, path, md5String, clientIp, size, Constant.FIRST_VERSION, 1);versionDao.insertNewVersion(resourceVersion);

    After the above process is successfully executed, the material data is truly completed, and then the UploadResult is returned to the client, and the status of this ResourceTask is marked as completed. Exception information.

    resource-task

    4.2.2 Engine material update process#

    Engine material update process sequence diagram

    Engine material update process sequence diagram

    If the table linkis_cg_engine_conn_plugin_bml_resources matches the local material data, you need to use the data in EngineConnLocalizeResource to construct an EngineConnBmlResource object, and update the metadata information such as the version number, file size, modification time, etc. of the original material file in the linkis_cg_engine_conn_plugin_bml_resources table. Before updating, you need to complete the update and upload operation of the material file, that is, execute the uploadToBml(localizeResource, engineConnBmlResource.getBmlResourceId) method.

    Inside the uploadToBml(localizeResource, resourceId) method, an interface for requesting material resource update by constructing bmlClient. which is:

    private val bmlClient = BmlClientFactory.createBmlClient()bmlClient.updateResource(Utils.getJvmUser, resourceId, localizeResource.fileName, localizeResource.getFileInputStream)

    In BML Server, the interface for material update is located in the updateVersion interface method in the BmlRestfulApi class. The main process is as follows:

    Complete the validity detection of resourceId, that is, check whether the incoming resourceId exists in the linkis_ps_bml_resources table. If the resourceId does not exist, an exception will be thrown to the client, and the material update operation at the interface level will fail.

    Therefore, the corresponding relationship of the resource data in the tables linkis_cg_engine_conn_plugin_bml_resources and linkis_ps_bml_resources needs to be complete, otherwise an error will occur that the material file cannot be updated.

    resourceService.checkResourceId(resourceId)

    If resourceId exists in the linkis_ps_bml_resources table, it will continue to execute:

    StringUtils.isEmpty(versionService.getNewestVersion(resourceId))

    The getNewestVersion method is to obtain the maximum version number of the resourceId in the table linkis_ps_bml_resources_version. If the maximum version corresponding to the resourceId is empty, the material will also fail to update, so the integrity of the corresponding relationship of the data here also needs to be strictly guaranteed.

    After the above two checks are passed, a ResourceUpdateTask will be created to complete the final file writing and record update saving.

    ResourceTask resourceTask = null;synchronized (resourceId.intern()) {    resourceTask = taskService.createUpdateTask(resourceId, user, file, properties);}

    Inside the createUpdateTask method, the main functions implemented are:

    // Generate a new version for the material resourceString lastVersion = getResourceLastVersion(resourceId);String newVersion = generateNewVersion(lastVersion);// Then the construction of ResourceTask, and state maintenanceResourceTask resourceTask = ResourceTask.createUpdateTask(resourceId, newVersion, user, system, properties);// The logic of material update upload is completed by the versionService.updateVersion methodversionService.updateVersion(resourceTask.getResourceId(), user, file, properties);

    Inside the versionService.updateVersion method, the main functions implemented are:

    ResourceHelper resourceHelper = ResourceHelperFactory.getResourceHelper();InputStream inputStream = file.getInputStream();// Get the path of the resourceString newVersion = params.get("newVersion").toString();String path = versionDao.getResourcePath(resourceId) + "_" + newVersion;// The acquisition logic of getResourcePath is to limit one from the original path, and then splice newVersion with _// select resource from linkis_ps_bml_resources_version WHERE resource_id = #{resourceId} limit 1// upload resources to hdfs or localStringBuilder stringBuilder = new StringBuilder();long size = resourceHelper.upload(path, user, inputStream, stringBuilder, OVER_WRITE);// Finally insert a new resource version record in the linkis_ps_bml_resources_version tableResourceVersion resourceVersion = ResourceVersion.createNewResourceVersion(resourceId, path, md5String, clientIp, size, newVersion, 1);versionDao.insertNewVersion(resourceVersion);
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/bml/overview/index.html b/docs/latest/architecture/public_enhancement_services/bml/overview/index.html index 07cdc2f1541..ccbca3870a2 100644 --- a/docs/latest/architecture/public_enhancement_services/bml/overview/index.html +++ b/docs/latest/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -18,7 +18,7 @@ The number of bytes. After the reading is successful, the stream information is returned to the user.

  • Insert a successful download record in resource_download_history

  • Database Design#

    1. Resource information table (resource)
    Field nameFunctionRemarks
    resource_idA string that uniquely identifies a resource globallyUUID can be used for identification
    resource_locationThe location where resources are storedFor example, hdfs:///tmp/bdp/\${USERNAME}/
    ownerThe owner of the resourcee.g. zhangsan
    create_timeRecord creation time
    is_shareWhether to share0 means not to share, 1 means to share
    update_timeLast update time of the resource
    is_expireWhether the record resource expires
    expire_timeRecord resource expiration time
    1. Resource version information table (resource_version)
    Field nameFunctionRemarks
    resource_idUniquely identifies the resourceJoint primary key
    versionThe version of the resource file
    start_byteStart byte count of resource file
    end_byteEnd bytes of resource file
    sizeResource file size
    resource_locationResource file placement location
    start_timeRecord upload start time
    end_timeEnd time of record upload
    updaterRecord update user
    1. Resource download history table (resource_download_history)
    FieldFunctionRemarks
    resource_idRecord the resource_id of the downloaded resource
    versionRecord the version of the downloaded resource
    downloaderRecord downloaded users
    start_timeRecord download time
    end_timeRecord end time
    statusWhether the record is successful0 means success, 1 means failure
    err_msgLog failure reasonnull means success, otherwise log failure reason
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html index 726643cc82f..b2ea9aa23ac 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS Architecture | Apache Linkis - + @@ -17,7 +17,7 @@

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 64272f2bb9d..2da60a4024b 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache Architecture | Apache Linkis - + @@ -16,7 +16,7 @@

    Note: The ContextIDValueGenerator will go to the persistence layer to pull the Array[ContextKeyValue] of the ContextID, and parse the ContextKeyValue key storage index and content through ContextKeyValueParser.

    The other interface processes provided by ContextCacheService are similar, so I won't repeat them here.

    KeyWord parsing logic#

    The specific entity bean of ContextValue needs to use the annotation \@keywordMethod on the corresponding get method that can be used as the keyword. For example, the getTableName method of Table must be annotated with \@keywordMethod.

    When ContextKeyValueParser parses ContextKeyValue, it scans all the annotations modified by KeywordMethod of the specific object passed in and calls the get method to obtain the returned object toString, which will be parsed through user-selectable rules and stored in the keyword collection. Rules have separators, and regular expressions

    Precautions:

    1. The annotation will be defined to the core module of cs

    2. The modified Get method cannot take parameters

    3. The toSting method of the return object of the Get method must return the keyword

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html index 5f16219116d..2f55a6e2fae 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client Design | Apache Linkis - + @@ -17,7 +17,7 @@ The second case is that the content of the ContextID is carried. We need to parse the csid. The way of parsing is to obtain the information of each instance through the method of string cutting, and then use eureka to determine whether this micro-channel still exists through the instance information. Service, if it exists, send it to this microservice instance

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 41a012db367..38ca7778d43 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA Design | Apache Linkis - + @@ -18,7 +18,7 @@ The client sends a request, and the Gateway forwards it to any server. The HA module generates the HAID, including the main instance, the backup instance and the CSID, and completes the binding of the workflow and the HAID.

    When the client sends a change request, Gateway determines that the main Instance is invalid, and then forwards the request to the standby Instance for processing. After the instance on the standby Instance verifies that the HAID is valid, it loads the instance and processes the request.

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 2b04c13e70e..39bba18a433 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    CS Listener Architecture

    Listener Architecture#

    In DSS, when a node changes its metadata information, the context information of the entire workflow changes. We expect all nodes to perceive the change and automatically update the metadata. We use the monitoring mode to achieve, and use the heartbeat mechanism to poll to maintain the metadata consistency of the context information.

    Client registration itself, CSKey registration and CSKey update process#

    The main process is as follows:

    1. Registration operation: The clients client1, client2, client3, and client4 register themselves and the CSKey they want to monitor with the csserver through HTPP requests. The Service service obtains the callback engine instance through the external interface, and registers the client and its corresponding CSKeys.

    2. Update operation: If the ClientX node updates the CSKey content, the Service service updates the CSKey cached by the ContextCache, and the ContextCache delivers the update operation to the ListenerBus. The ListenerBus notifies the specific listener to consume (that is, the ContextKeyCallbackEngine updates the CSKeys corresponding to the Client). The consumed event will be automatically removed.

    3. Heartbeat mechanism:

    All clients use heartbeat information to detect whether the value of CSKeys in ContextKeyCallbackEngine has changed.

    ContextKeyCallbackEngine returns the updated CSKeys value to all registered clients through the heartbeat mechanism. If there is a client's heartbeat timeout, remove the client.

    Listener UM class diagram#

    Interface: ListenerManager

    External: Provide ListenerBus for event delivery.

    Internally: provide a callback engine for specific event registration, access, update, and heartbeat processing logic

    Listener callbackengine timing diagram#

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 88a09657e1a..680a9d3f1a7 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html b/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html index 897219cc9a6..db4c4deb89a 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    CS Search Architecture

    CSSearch Architecture#

    Overall architecture#

    As shown below:

    1. ContextSearch: The query entry, accepts the query conditions defined in the Map form, and returns the corresponding results according to the conditions.

    2. Building module: Each condition type corresponds to a Parser, which is responsible for converting the condition in the form of Map into a Condition object, which is implemented by calling the logic of ConditionBuilder. Conditions with complex logical relationships will use ConditionOptimizer to optimize query plans based on cost-based algorithms.

    3. Execution module: Filter out the results that match the conditions from the Cache. According to different query targets, there are three execution modes: Ruler, Fetcher and Match. The specific logic is described later.

    4. Evaluation module: Responsible for calculation of conditional execution cost and statistics of historical execution status.

    Query Condition Definition (ContextSearchCondition)#

    A query condition specifies how to filter out the part that meets the condition from a ContextKeyValue collection. The query conditions can be used to form more complex query conditions through logical operations.

    1. Support ContextType, ContextScope, KeyWord matching

      1. Corresponding to a Condition type

      2. In Cache, these should have corresponding indexes

    2. Support contains/regex matching mode for key

      1. ContainsContextSearchCondition: contains a string

      2. RegexContextSearchCondition: match a regular expression

    3. Support logical operations of or, and and not

      1. Unary operation UnaryContextSearchCondition:

    Support logical operations of a single parameter, such as NotContextSearchCondition

    1. Binary operation BinaryContextSearchCondition:

    Support the logical operation of two parameters, defined as LeftCondition and RightCondition, such as OrContextSearchCondition and AndContextSearchCondition

    1. Each logical operation corresponds to an implementation class of the above subclass

    2. The UML class diagram of this part is as follows:

    Construction of query conditions#

    1. Support construction through ContextSearchConditionBuilder: When constructing, if multiple ContextType, ContextScope, KeyWord, contains/regex matches are declared at the same time, they will be automatically connected by And logical operation

    2. Support logical operations between Conditions and return new Conditions: And, Or and Not (considering the form of condition1.or(condition2), the top-level interface of Condition is required to define logical operation methods)

    3. Support to build from Map through ContextSearchParser corresponding to each underlying implementation class

    Execution of query conditions#

    1. Three function modes of query conditions:

      1. Ruler: Filter out eligible ContextKeyValue sub-Arrays from an Array

      2. Matcher: Determine whether a single ContextKeyValue meets the conditions

      3. Fetcher: Filter out an Array of eligible ContextKeyValue from ContextCache

    2. Each bottom-level Condition has a corresponding Execution, responsible for maintaining the corresponding Ruler, Matcher, and Fetcher.

    Query entry ContextSearch#

    Provide a search interface, receive Map as a parameter, and filter out the corresponding data from the Cache.

    1. Use Parser to convert the condition in the form of Map into a Condition object

    2. Obtain cost information through Optimizer, and determine the order of query according to the cost information

    3. After executing the corresponding Ruler/Fetcher/Matcher logic through the corresponding Execution, the search result is obtained

    Query Optimization#

    1. OptimizedContextSearchCondition maintains the Cost and Statistics information of the condition:

      1. Cost information: CostCalculator is responsible for judging whether a certain Condition can calculate Cost, and if it can be calculated, it returns the corresponding Cost object

      2. Statistics information: start/end/execution time, number of input lines, number of output lines

    2. Implement a CostContextSearchOptimizer, whose optimize method is based on the cost of the Condition to optimize the Condition and convert it into an OptimizedContextSearchCondition object. The specific logic is described as follows:

      1. Disassemble a complex Condition into a tree structure based on the combination of logical operations. Each leaf node is a basic simple Condition; each non-leaf node is a logical operation.

    Tree A as shown in the figure below is a complex condition composed of five simple conditions of ABCDE through various logical operations.

    (Tree A)
    1. The execution of these Conditions is actually depth first, traversing the tree from left to right. Moreover, according to the exchange rules of logical operations, the left and right order of the child nodes of a node in the Condition tree can be exchanged, so all possible trees in all possible execution orders can be enumerated.

    Tree B as shown in the figure below is another possible sequence of tree A above, which is exactly the same as the execution result of tree A, except that the execution order of each part has been adjusted.

    (Tree B)
    1. For each tree, the cost is calculated from the leaf node and collected to the root node, which is the final cost of the tree, and finally the tree with the smallest cost is obtained as the optimal execution order.

    The rules for calculating node cost are as follows:

    1. For leaf nodes, each node has two attributes: Cost and Weight. Cost is the cost calculated by CostCalculator. Weight is assigned according to the order of execution of the nodes. The current default is 1 on the left and 0.5 on the right. See how to adjust it later (the reason for assigning weight is that the conditions on the left have already been set in some cases. It can directly determine whether the entire combinatorial logic matches or not, so the condition on the right does not have to be executed in all cases, and the actual cost needs to be reduced by a certain percentage)

    2. For non-leaf nodes, Cost = the sum of Cost×Weight of all child nodes; the weight assignment logic is consistent with that of leaf nodes.

    Taking tree A and tree B as examples, calculate the costs of these two trees respectively, as shown in the figure below, the number in the node is Cost|Weight, assuming that the cost of the 5 simple conditions of ABCDE is 10, 100, 50 , 10, and 100. It can be concluded that the cost of tree B is less than that of tree A, which is a better solution.

    1. Use CostCalculator to measure the cost of simple conditions:

      1. The condition acting on the index: the cost is determined according to the distribution of the index value. For example, when the length of the Array obtained by condition A from the Cache is 100 and condition B is 200, then the cost of condition A is less than B.

      2. Conditions that need to be traversed:

        1. According to the matching mode of the condition itself, an initial Cost is given: For example, Regex is 100, Contains is 10, etc. (the specific values ​​etc. will be adjusted according to the situation when they are realized)

        2. According to the efficiency of historical query, the real-time Cost is obtained after continuous adjustment on the basis of the initial Cost. Throughput per unit time

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html b/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html index 24431e8e4a6..6c71bdfa129 100644 --- a/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html +++ b/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -22,7 +22,7 @@ Enter Persistence architecture design

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html b/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html index 7256017d1ad..1c180efd80d 100644 --- a/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html +++ b/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management unpacks into data source management services and metadata management services。

    This article mainly involves the DataSource Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, reuse Linkis service capabilities;

    2)、Provide management services of graphical interface through Linkis Web. The interface provides management services such as new data source, data source query, data source update, connectivity test and so on;

    3)、 the service is stateless, multi-instance deployment, so that the service is highly available. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    4)、Provide full life cycle management of data sources, including new, query, update, test, and expiration management.

    5)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    6)、The Restful interface provides functions, a detailed list: data source type query, data source detailed information query, data source information query based on version, data source version query, get data source parameter list, multi-dimensional data source search, get data source environment query and Update, add data source, data source parameter configuration, data source expiration setting, data source connectivity test.

    Architecture Diagram#

    datasource Architecture diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name data-source-manager.

    2、The interface layer provides other applications through the Restful interface, providing additions, deletions, and changes to data sources and data source environments, data source link and dual link tests, data source version management and expiration operations;

    3、The Service layer is mainly for the service management of the database and the material library, and permanently retains the relevant information of the data source;

    4、The link test of the data source is done through the linkis metastore server service, which now provides the mysql\es\kafka\hive service

    Core Process#

    1、To create a new data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the data source. The data source name and data source type cannot be empty. The data source name is used to confirm whether the data source exists. If it does not exist, it will be inserted in the database, and the data source ID number will be returned.

    2、 To update the data source, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid. The next step will be to verify the relevant field information of the new data source. The data source name and data source type cannot be empty. It will confirm whether the data source exists according to the data source ID number. If it does not exist, an exception will be returned. If it exists, it will be further judged whether the user has update permission for the data source. The user is the administrator or the owner of the data source. Only have permission to update. If you have permission, the data source will be updated and the data source ID will be returned.

    3、 To update the data source parameters, firstly, the user of the new data source will be obtained from the request to determine whether the user is valid, and the detailed data source information will be obtained according to the passed parameter data source ID, and then it will be determined whether the user is the owner of the changed data source or not. For the administrator, if there is any, the modified parameters will be further verified, and the parameters will be updated after passing, and the versionId will be returned.

    Entity Object#

    Class NameDescribe
    DataSourceTypeIndicates the type of data source
    DataSourceParamKeyDefinitionDeclare data source property configuration definitions
    DataSourceData source object entity class, including permission tags and attribute configuration definitions
    DataSourceEnvData source environment object entity class, which also contains attribute configuration definitions
    DataSourceParameterData source specific parameter configuration
    DatasourceVersionData source version details

    Database Design#

    Database Diagram:#

    Data Table Definition:#

    Table:linkis_ps_dm_datatsource <-->Object:DataSource

    Serial NumberColumnDescribe
    1idData source ID
    2datasource_nameData source name
    3datasource_descData source detailed description
    4datasource_type_idData source type ID
    5create_identifycreate identify
    6create_systemSystem for creating data sources
    7parameterData source parameters
    8create_timeData source creation time
    9modify_timeData source modification time
    10create_userData source create user
    11modify_userData source modify user
    12labelsData source label
    13version_idData source version ID
    14expireWhether the data source is out of date
    15published_version_idData source release version number

    Table Name:linkis_ps_dm_datasource_type <-->Object:DataSourceType

    Serial NumberColumnDescribe
    1idData source type ID
    2nameData source type name
    3descriptionData source type description
    4optionType of data source
    5classifierData source type classifier
    6iconData source image display path
    7layersData source type hierarchy

    Table:linkis_ps_dm_datasource_env <-->Object:DataSourceEnv

    Serial NumberColumnDescribe
    1idData source environment ID
    2env_nameData source environment name
    3env_descData source environment description
    4datasource_type_idData source type ID
    5parameterData source environment parameters
    6create_timeData source environment creation time
    7create_userData source environment create user
    8modify_timeData source modification time
    9modify_userData source modify user

    Table:linkis_ps_dm_datasource_type_key <-->Object:DataSourceParamKeyDefinition

    Serial NumberColumnDescribe
    1idKey-value type ID
    2data_source_type_idData source type ID
    3keyData source parameter key value
    4nameData source parameter name
    5default_valueData source parameter default value
    6value_typeData source parameter type
    7scopeData source parameter range
    8requireIs the data source parameter required?
    9descriptionData source parameter description
    10value_regexRegular data source parameters
    11ref_idData source parameter association ID
    12ref_valueData source parameter associated value
    13data_sourceData source
    14update_timeupdate time
    15create_timeCreate Time

    Table:linkis_ps_dm_datasource_version <-->Object:DatasourceVersion

    Serial NumberColumnDescribe
    1version_idData source version ID
    2datasource_idData source ID
    3parameterThe version parameter of the data source
    4commentcomment
    5create_timeCreate Time
    6create_userCreate User
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html b/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html index 403e59bafed..3906f216620 100644 --- a/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html +++ b/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ Data Source Management Service Architecture | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Data Source Management Service Architecture

    Background#

    Exchangis0.x and Linkis0.x in earlier versions both have integrated data source modules. In order to manage the ability to reuse data sources, Linkis reconstructs the data source module based on linkis-datasource (refer to related documents), and converts the data source Management is unpacked into data source management services and metadata management services.

    This article mainly involves the MetaData Manager Server data source management service, which provides the following functions:

    1)、Linkis unified management service startup and deployment, does not increase operation and maintenance costs, and reuses Linkis service capabilities;

    2)、The service is stateless and deployed in multiple instances to achieve high service availability. When the system is deployed, multiple instances can be deployed. Each instance provides services independently to the outside world without interfering with each other. All information is stored in the database for sharing.

    3)、Provides full life cycle management of data sources, including new, query, update, test, and expiration management.

    4)、Multi-version data source management, historical data sources will be saved in the database, and data source expiration management is provided.

    5)、The Restful interface provides functions, a detailed list: database information query, database table information query, database table parameter information query, and data partition information query.

    Architecture Diagram#

    Data Source Architecture Diagram

    Architecture Description#

    1、The service is registered in the Linkis-Eureak-Service service and managed in a unified manner with other Linkis microservices. The client can obtain the data source management service by connecting the Linkis-GateWay-Service service and the service name metamanager.

    2、The interface layer provides database\table\partition information query to other applications through the Restful interface;

    3、In the Service layer, the data source type is obtained in the data source management service through the data source ID number, and the specific supported services are obtained through the type. The first supported service is mysql\es\kafka\hive;

    Core Process#

    1、 The client enters the specified data source ID and obtains information through the restful interface. For example, to query the database list with the data source ID of 1, the url is http://<meta-server-url>/metadatamanager/dbs/1

    2、 According to the data source ID, access the data source service <data-source-manager> through RPC to obtain the data source type;

    3、 According to the data source type, load the corresponding Service service [hive\es\kafka\mysql], perform the corresponding operation, and then return;

    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/overview/index.html b/docs/latest/architecture/public_enhancement_services/overview/index.html index a440a267e46..ea8df236a2c 100644 --- a/docs/latest/architecture/public_enhancement_services/overview/index.html +++ b/docs/latest/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    PublicEnhencementService (PS) architecture design

    PublicEnhancementService (PS): Public enhancement service, a module that provides functions such as unified configuration management, context service, physical library, data source management, microservice management, and historical task query for other microservice modules.

    Introduction to the second-level module:

    BML material library#

    It is the linkis material management system, which is mainly used to store various file data of users, including user scripts, resource files, third-party Jar packages, etc., and can also store class libraries that need to be used when the engine runs.

    Core ClassCore Function
    UploadServiceProvide resource upload service
    DownloadServiceProvide resource download service
    ResourceManagerProvides a unified management entry for uploading and downloading resources
    VersionManagerProvides resource version marking and version management functions
    ProjectManagerProvides project-level resource management and control capabilities

    Unified configuration management#

    Configuration provides a "user-engine-application" three-level configuration management solution, which provides users with the function of configuring custom engine parameters under various access applications.

    Core ClassCore Function
    CategoryServiceProvides management services for application and engine catalogs
    ConfigurationServiceProvides a unified management service for user configuration

    ContextService context service#

    ContextService is used to solve the problem of data and information sharing across multiple systems in a data application development process.

    Core ClassCore Function
    ContextCacheServiceProvides a cache service for context information
    ContextClientProvides the ability for other microservices to interact with the CSServer group
    ContextHAManagerProvide high-availability capabilities for ContextService
    ListenerManagerThe ability to provide a message bus
    ContextSearchProvides query entry
    ContextServiceImplements the overall execution logic of the context service

    Datasource data source management#

    Datasource provides the ability to connect to different data sources for other microservices.

    Core ClassCore Function
    datasource-serverProvide the ability to connect to different data sources

    InstanceLabel microservice management#

    InstanceLabel provides registration and labeling functions for other microservices connected to linkis.

    Core ClassCore Function
    InsLabelServiceProvides microservice registration and label management functions

    Jobhistory historical task management#

    Jobhistory provides users with linkis historical task query, progress, log display related functions, and provides a unified historical task view for administrators.

    Core ClassCore Function
    JobHistoryQueryServiceProvide historical task query service

    Variable user-defined variable management#

    Variable provides users with functions related to the storage and use of custom variables.

    Core ClassCore Function
    VariableServiceProvides functions related to the storage and use of custom variables

    UDF user-defined function management#

    UDF provides users with the function of custom functions, which can be introduced by users when writing code.

    Core ClassCore Function
    UDFServiceProvide user-defined function service
    - + \ No newline at end of file diff --git a/docs/latest/architecture/public_enhancement_services/public_service/index.html b/docs/latest/architecture/public_enhancement_services/public_service/index.html index bf4874acebf..20d8c33439f 100644 --- a/docs/latest/architecture/public_enhancement_services/public_service/index.html +++ b/docs/latest/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ Public Service | Apache Linkis - + @@ -20,7 +20,7 @@ The main functions are as follows:

    • Provides resource management capabilities for some specific labels to assist RM in more refined resource management.

    • Provides labeling capabilities for users. The user label will be automatically added for judgment when applying for the engine.

    • Provides the label analysis module, which can parse the users' request into a bunch of labels。

    • With the ability of node label management, it is mainly used to provide the label CRUD capability of the node and the label resource management to manage the resources of certain labels, marking the maximum resource, minimum resource and used resource of a Label.

    - + \ No newline at end of file diff --git a/docs/latest/deployment/cluster_deployment/index.html b/docs/latest/deployment/cluster_deployment/index.html index 38be6f02be4..d6ec1a6c233 100644 --- a/docs/latest/deployment/cluster_deployment/index.html +++ b/docs/latest/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ Cluster Deployment | Apache Linkis - + @@ -26,7 +26,7 @@ Linux clear process sudo kill - 9 process number

    4. matters needing attention#

    4.1 It is best to start all services at the beginning, because there are dependencies between services. If some services do not exist and the corresponding backup cannot be found through Eureka, the service will fail to start. After the service fails to start, it will not restart automatically. Wait until the alternative service is added, and then close the relevant services#

    - + \ No newline at end of file diff --git a/docs/latest/deployment/deploy_linkis_without_hdfs/index.html b/docs/latest/deployment/deploy_linkis_without_hdfs/index.html index 37ac2fa0d95..03d7cd9e9c8 100644 --- a/docs/latest/deployment/deploy_linkis_without_hdfs/index.html +++ b/docs/latest/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Deploy Linkis without HDFS | Apache Linkis - + @@ -20,7 +20,7 @@ [INFO] Retrieving result-set, may take time if result-set is large, please do not exit program.============ RESULT SET 1 ============hello ############Execute Success!!!########
    - + \ No newline at end of file diff --git a/docs/latest/deployment/engine_conn_plugin_installation/index.html b/docs/latest/deployment/engine_conn_plugin_installation/index.html index 24bb1af2fc4..56fe428fab7 100644 --- a/docs/latest/deployment/engine_conn_plugin_installation/index.html +++ b/docs/latest/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ EngineConnPlugin Installation | Apache Linkis - + @@ -17,7 +17,7 @@ wds.linkis.engineconn.plugin.loader.store.path, which is used by EngineConnPluginServer to read the actual implementation Jar of the engine.

    It is highly recommended specifying wds.linkis.engineconn.home and wds.linkis.engineconn.plugin.loader.store.path as the same directory, so that you can directly unzip the engine ZIP package exported by maven into this directory, such as: Place it in the ${LINKIS_HOME}/lib/linkis-engineconn-plugins directory.

    ${LINKIS_HOME}/lib/linkis-engineconn-plugins:└── hive    └── dist    └── plugin└── spark    └── dist    └── plugin

    If the two parameters do not point to the same directory, you need to place the dist and plugin directories separately, as shown in the following example:

    ## dist directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/dist:└── hive    └── dist└── spark    └── dist## plugin directory${LINKIS_HOME}/lib/linkis-engineconn-plugins/plugin:└── hive    └── plugin└── spark    └── plugin

    2.2 Configuration modification of management console (optional)#

    The configuration of the Linkis1.0 management console is managed according to the engine label. If the new engine has configuration parameters, you need to insert the corresponding configuration parameters in the Configuration, and you need to insert the parameters in three tables:

    linkis_configuration_config_key: Insert the key and default values of the configuration parameters of the enginlinkis_manager_label: Insert engine label such as hive-1.2.1linkis_configuration_category: Insert the catalog relationship of the enginelinkis_configuration_config_value: Insert the configuration that the engine needs to display

    If it is an existing engine and a new version is added, you can modify the version of the corresponding engine in the linkis_configuration_dml.sql file for execution

    2.3 Engine refresh#

    1. The engine supports real-time refresh. After the engine is placed in the corresponding directory, Linkis1.0 provides a method to load the engine without shutting down the server, and just send a request to the linkis-engineconn-plugin-server service through the restful interface, that is, the actual deployment of the service Ip+port, the request interface is http://ip:port/api/rest_j/v1/rpc/receiveAndReply, the request method is POST, the request body is {"method":"/enginePlugin/engineConn/refreshAll"}.

    2. Restart refresh: the engine catalog can be forced to refresh by restarting

    ### cd to the sbin directory, restart linkis-engineconn-plugin-servercd /Linkis1.0.0/sbin## Execute linkis-daemon scriptsh linkis-daemon.sh restart linkis-engine-plugin-server

    3.Check whether the engine refresh is successful: If you encounter problems during the refresh process and need to confirm whether the refresh is successful, you can check whether the last_update_time of the linkis_engine_conn_plugin_bml_resources table in the database is the time when the refresh is triggered.

    - + \ No newline at end of file diff --git a/docs/latest/deployment/installation_hierarchical_structure/index.html b/docs/latest/deployment/installation_hierarchical_structure/index.html index 0182b45ce20..e176c25d391 100644 --- a/docs/latest/deployment/installation_hierarchical_structure/index.html +++ b/docs/latest/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Installation Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Installation directory structure

    The directory structure of Linkis 1.0 is very different from the 0.X version. Each microservice in 0.X has a root directory that exists independently. The main advantage of this directory structure is that it is easy to distinguish microservices and facilitate individual Microservices are managed, but there are some obvious problems:

    1. The microservice catalog is too complicated and it is not convenient to switch catalog management
    2. There is no unified startup script, which makes it more troublesome to start and stop microservices
    3. There are a large number of duplicate service configurations, and the same configuration often needs to be modified in many places
    4. There are a large number of repeated Lib dependencies, which increases the size of the installation package and the risk of dependency conflicts

    Therefore, in Linkis 1.0, we have greatly optimized and adjusted the installation directory structure, reducing the number of microservice directories, reducing the jar packages that are repeatedly dependent, and reusing configuration files and microservice management scripts as much as possible. Mainly reflected in the following aspects:

    1.The bin folder is no longer provided for each microservice, and modified to be shared by all microservices.

    The Bin folder is modified to the installation directory, which is mainly used to install Linkis 1.0 and check the environment status. The new sbin directory provides one-click start and stop for Linkis, and provides independent start and stop for all microservices by changing parameters.

    2.No longer provide a separate conf directory for each microservice, and modify it to be shared by all microservices.

    The Conf folder contains two aspects of content. On the one hand, it is the configuration information shared by all microservices. This type of configuration information contains information that users can customize configuration according to their own environment; on the other hand, it is the special characteristics of each microservice. Configuration, under normal circumstances, users do not need to change by themselves.

    3.The lib folder is no longer provided for each microservice, and modified to be shared by all microservices

    The Lib folder also contains two aspects of content, on the one hand, the common dependencies required by all microservices; on the other hand, the special dependencies required by each microservice.

    4.The log directory is no longer provided for each microservice, modified to be shared by all microservices

    The Log directory contains log files of all microservices.

    The simplified directory structure of Linkis 1.0 is as follows.

    ├── bin ──installation directory│ ├── checkEnv.sh ── Environmental variable detection│ ├── checkServices.sh ── Microservice status check│ ├── common.sh ── Some public shell functions│ ├── install-io.sh ── Used for dependency replacement during installation│ └── install.sh ── Main script of Linkis installation├── conf ──configuration directory│ ├── application-eureka.yml │ ├── application-linkis.yml    ──Microservice general yml│ ├── linkis-cg-engineconnmanager-io.properties│ ├── linkis-cg-engineconnmanager.properties│ ├── linkis-cg-engineplugin.properties│ ├── linkis-cg-entrance.properties│ ├── linkis-cg-linkismanager.properties│ ├── linkis-computation-governance│ │   └── linkis-client│ │       └── linkis-cli│ │           ├── linkis-cli.properties│ │           └── log4j2.xml│ ├── linkis-env.sh   ──linkis environment properties│ ├── linkis-et-validator.properties│ ├── linkis-mg-gateway.properties│ ├── linkis.properties  ──linkis global properties│ ├── linkis-ps-bml.properties│ ├── linkis-ps-cs.properties│ ├── linkis-ps-datasource.properties│ ├── linkis-ps-publicservice.properties│ ├── log4j2.xml│ ├── proxy.properties(Optional)│ └── token.properties(Optional)├── db ──database DML and DDL file directory│ ├── linkis\_ddl.sql ──Database table definition SQL│ ├── linkis\_dml.sql ──Database table initialization SQL│ └── module ──Contains DML and DDL files of each microservice├── lib ──lib directory│ ├── linkis-commons ──Common dependency package│ ├── linkis-computation-governance ──The lib directory of the computing governance module│ ├── linkis-engineconn-plugins ──lib directory of all EngineConnPlugins│ ├── linkis-public-enhancements ──lib directory of public enhancement services│ └── linkis-spring-cloud-services ──SpringCloud lib directory├── logs ──log directory│ ├── linkis-cg-engineconnmanager-gc.log│ ├── linkis-cg-engineconnmanager.log│ ├── linkis-cg-engineconnmanager.out│ ├── linkis-cg-engineplugin-gc.log│ ├── linkis-cg-engineplugin.log│ ├── linkis-cg-engineplugin.out│ ├── linkis-cg-entrance-gc.log│ ├── linkis-cg-entrance.log│ ├── linkis-cg-entrance.out│ ├── linkis-cg-linkismanager-gc.log│ ├── linkis-cg-linkismanager.log│ ├── linkis-cg-linkismanager.out│ ├── linkis-et-validator-gc.log│ ├── linkis-et-validator.log│ ├── linkis-et-validator.out│ ├── linkis-mg-eureka-gc.log│ ├── linkis-mg-eureka.log│ ├── linkis-mg-eureka.out│ ├── linkis-mg-gateway-gc.log│ ├── linkis-mg-gateway.log│ ├── linkis-mg-gateway.out│ ├── linkis-ps-bml-gc.log│ ├── linkis-ps-bml.log│ ├── linkis-ps-bml.out│ ├── linkis-ps-cs-gc.log│ ├── linkis-ps-cs.log│ ├── linkis-ps-cs.out│ ├── linkis-ps-datasource-gc.log│ ├── linkis-ps-datasource.log│ ├── linkis-ps-datasource.out│ ├── linkis-ps-publicservice-gc.log│ ├── linkis-ps-publicservice.log│ └── linkis-ps-publicservice.out├── pid ──Process ID of all microservices│ ├── linkis\_cg-engineconnmanager.pid ──EngineConnManager microservice│ ├── linkis\_cg-engineconnplugin.pid ──EngineConnPlugin microservice│ ├── linkis\_cg-entrance.pid ──Engine entrance microservice│ ├── linkis\_cg-linkismanager.pid ──linkis manager microservice│ ├── linkis\_mg-eureka.pid ──eureka microservice│ ├── linkis\_mg-gateway.pid ──gateway microservice│ ├── linkis\_ps-bml.pid ──material library microservice│ ├── linkis\_ps-cs.pid ──Context microservice│ ├── linkis\_ps-datasource.pid ──Data source microservice│ └── linkis\_ps-publicservice.pid ──public microservice└── sbin ──microservice start and stop script directory    ├── ext ──Start and stop script directory of each microservice    ├── linkis-daemon.sh ── Quick start and stop, restart a single microservice script    ├── linkis-start-all.sh ── Start all microservice scripts with one click    └── linkis-stop-all.sh ── Stop all microservice scripts with one click

    Configuration item modification

    After executing the install.sh in the bin directory to complete the Linkis installation, you need to modify the configuration items. All configuration items are located in the con directory. Normally, you need to modify the three configurations of db.sh, linkis.properties, and linkis-env.sh For documentation, project installation and configuration, please refer to the article "Linkis1.0 Installation"

    Microservice start and stop

    After modifying the configuration items, you can start the microservice in the sbin directory. The names of all microservices are as follows:

    ├── linkis-cg-engineconnmanager  ──engine management service├── linkis-cg-engineplugin  ──EngineConnPlugin management service├── linkis-cg-entrance  ──computing governance entrance service├── linkis-cg-linkismanager  ──computing governance management service├── linkis-mg-eureka  ──microservice registry service├── linkis-mg-gateway  ──Linkis gateway service├── linkis-ps-bml  ──material library service├── linkis-ps-cs  ──context service├── linkis-ps-datasource  ──data source service└── linkis-ps-publicservice  ──public service

    Microservice abbreviation:

    AbbreviationFull English NameFull Chinese Name
    cgComputation GovernanceComputing Governance
    mgMicroservice GovernanceMicroservice Governance
    psPublic Enhancement ServicePublic Enhancement Service

    In the past, to start and stop a single microservice, you need to enter the bin directory of each microservice and execute the start/stop script. When there are many microservices, it is troublesome to start and stop. A lot of additional directory switching operations are added. Linkis1.0 will all The scripts related to the start and stop of microservices are placed in the sbin directory, and only a single entry script needs to be executed.

    Under the Linkis/sbin directory:

    1.Start all microservices at once:

    sh linkis-start-all.sh

    2.Shut down all microservices at once

    sh linkis-stop-all.sh

    3.Start a single microservice (the service name needs to be removed from the Linkis prefix, such as mg-eureka)

    sh linkis-daemon.sh start service-name

    For example:

    sh linkis-daemon.sh start mg-eureka

    4.Shut down a single microservice

    sh linkis-daemon.sh stop service-name

    For example:

    sh linkis-daemon.sh stop mg-eureka

    5.Restart a single microservice

    sh linkis-daemon.sh restart service-name

    For example:

    sh linkis-daemon.sh restart mg-eureka

    6.View the status of a single microservice

    sh linkis-daemon.sh status service-name

    For example:

    sh linkis-daemon.sh status mg-eureka
    - + \ No newline at end of file diff --git a/docs/latest/deployment/involve_skywalking_into_linkis/index.html b/docs/latest/deployment/involve_skywalking_into_linkis/index.html index 7f1b812bb6c..00cdf9c535a 100644 --- a/docs/latest/deployment/involve_skywalking_into_linkis/index.html +++ b/docs/latest/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ Involve SkyWaling into Linkis | Apache Linkis - + @@ -20,7 +20,7 @@

    Modify the configuration item SKYWALKING_AGENT_PATH in linkis-env.sh of Linkis. Set it to the path to skywalking-agent.jar.

    SKYWALKING_AGENT_PATH=/path/to/skywalking-agent.jar

    Then start Linkis.

    $ bash linkis-start-all.sh

    4. Result display#

    The UI port of Linkis starts at port 8080 by default. After Linkis opens SkyWalking and opens the UI, if you can see the following picture, it means success.

    - + \ No newline at end of file diff --git a/docs/latest/deployment/linkis_scriptis_install/index.html b/docs/latest/deployment/linkis_scriptis_install/index.html index aa3cea12c54..50f122709b0 100644 --- a/docs/latest/deployment/linkis_scriptis_install/index.html +++ b/docs/latest/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ Installation and deployment of tool scriptis | Apache Linkis - + @@ -28,7 +28,7 @@

    After modifying the configuration, reload the nginx configuration

    sudo nginx -s reload

    Note the difference between root and alias in nginx

    • The result of root processing is: root path + location path
    • The result of alias processing is to replace the location path with the alias path
    • Alias is the definition of a directory alias, and root is the definition of the top-level directory

    4. scriptis Use steps#

    4.1 Log in to the linkis management console normally#

    #http://10.10.10.10:8080/#/http://nginxIp:port/#/

    Because scripts requires login verification, you need to log in first to get the cookie.

    4.2 Visit the scripts page after successful login#

    #http://10.10.10.10:8080/scriptis/http://nginxIp:port/scriptis/

    Nginxip:nginx server IP, port:linkis management console nginx configuration start port number, scripts is the location address configured for the static file nginx of the requested scripts project (customizable)

    4.3 use scriptis#

    Take creating an SQL query task as an example.

    step1 New script

    design sketch

    step2 Enter the statement to query

    design sketch

    step3 function

    design sketch

    shep4 View results

    design sketch

    - + \ No newline at end of file diff --git a/docs/latest/deployment/quick_deploy/index.html b/docs/latest/deployment/quick_deploy/index.html index b09fbd44ee1..187dfca439d 100644 --- a/docs/latest/deployment/quick_deploy/index.html +++ b/docs/latest/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ Quick Deployment | Apache Linkis - + @@ -21,7 +21,7 @@ ##:If your hive version is not 1.2.1, you need to modify the following parameter: #HIVE_VERSION=2.3.3

    f. Modify the database configuration#

    vi deploy-config/db.sh 
    # set the connection information of the database# including ip address, database's name, username and port# Mainly used to store user's customized variables, configuration parameters, UDFs, and samll functions, and to provide underlying storage of the JobHistory.MYSQL_HOST=MYSQL_PORT=MYSQL_DB=MYSQL_USER=MYSQL_PASSWORD=

    3. Installation and Startup#

    1. Execute the installation script:#

    sh bin/install.sh

    2. Installation steps#

    • The install.sh script will ask you whether to initialize the database and import the metadata.

    It is possible that a user might repeatedly run the install.sh script and results in clearing all data in databases. Therefore, each time the install.sh is executed, user will be asked if they need to initialize the database and import the metadata.

    Please select yes on the first installation.

    Please note: If you are upgrading the existing environment of Linkis from 0.X to 1.0, please do not choose yes directly, refer to Linkis1.0 Upgrade Guide first.

    3. Whether install successfully#

    You can check whether the installation is successful or not by viewing the logs printed on the console.

    If there is an error message, check the specific reason for that error or refer to FAQ for help.

    4. Add mysql driver package#

    Note

    Because the mysql-connector-java driver is under the GPL2.0 agreement and does not meet the license policy of the Apache open source agreement, starting from version 1.0.3, the official deployment package of the Apache version is provided. The default is no mysql-connector-java-x.x.x.jar dependency package, you need to add dependencies to the corresponding lib package during installation and deployment

    To download the mysql driver, take version 5.1.49 as an example: download link https://repo1.maven.org/maven2/mysql/mysql-connector-java/5.1.49/mysql-connector-java-5.1.49.jar

    Copy the mysql driver package to the lib package path

    cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-spring-cloud-services/linkis-mg-gateway/cp mysql-connector-java-5.1.49.jar {LINKIS_HOME}/lib/linkis-commons/public-module/

    5. Linkis quick startup#

    Notice that if you use DSS or other projects that rely on Linkis version < 1.1.1, you also need to modify the ${LINKIS_HOME}/conf/linkis.properties file:

    echo "wds.linkis.session.ticket.key=bdp-user-ticket-id" >> linkis.properties

    (1). Start services

    Run the following commands on the installation directory to start all services.

    sh sbin/linkis-start-all.sh

    (2). Check if start successfully

    You can check the startup status of the services on the Eureka, here is the way to check:

    Open http://${EUREKA_INSTALL_IP}:${EUREKA_PORT} on the browser and check if services have registered successfully.

    If you have not specified EUREKA_INSTALL_IP and EUREKA_INSTALL_IP in config.sh, then the HTTP address is http://127.0.0.1:20303

    As shown in the figure below, if all the following micro-services are registered in the Eureka, it means that they've started successfully and been able to work.

    Linkis1.0_Eureka

    - + \ No newline at end of file diff --git a/docs/latest/deployment/sourcecode_hierarchical_structure/index.html b/docs/latest/deployment/sourcecode_hierarchical_structure/index.html index f18e58e94fe..eda19759a22 100644 --- a/docs/latest/deployment/sourcecode_hierarchical_structure/index.html +++ b/docs/latest/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ Source Code Directory Structure | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Source Code Directory Structure

    Linkis source code hierarchical directory structure description, if you want to learn more about Linkis modules, please check Linkis related architecture design

    |-- assembly-combined-package //Compile the module of the entire project|        |-- assembly-combined|        |-- bin|        |-- deploy-config|        |-- src|-- linkis-commons //Core abstraction, which contains all common modules|        |-- linkis-common //Common module, built-in many common tools|        |-- linkis-hadoop-common|        |-- linkis-httpclient //Java SDK top-level interface|        |-- linkis-message-scheduler|        |-- linkis-module|        |-- linkis-mybatis //SpringCloud's Mybatis module|        |-- linkis-protocol|        |-- linkis-rpc //RPC module, complex two-way communication based on Feign|        |-- linkis-scheduler //General scheduling module|        |-- linkis-storage|        ||-- linkis-computation-governance //computing governance service|        |-- linkis-client //Java SDK, users can directly access Linkis through Client|        |-- linkis-computation-governance-common|        |-- linkis-engineconn|        |-- linkis-engineconn-manager|        |-- linkis-entrance //General low-level entrance module|        |-- linkis-entrance-client|        |-- linkis-jdbc-driver|        |-- linkis-manager||-- linkis-engineconn-plugins|        |-- engineconn-plugins|        |-- linkis-engineconn-plugin-framework||-- linkis-extensions|        |-- linkis-io-file-client|-- linkis-orchestrator|        |-- linkis-code-orchestrator|        |-- linkis-computation-orchestrator|        |-- linkis-orchestrator-core|        |-- plugin|-- linkis-public-enhancements //Public enhancement services|        |-- linkis-bml // Material library|        |-- linkis-context-service //Unified context|        |-- linkis-datasource //Data source service|        |-- linkis-publicservice //Public Service|-- linkis-spring-cloud-services //Microservice governance|        |-- linkis-service-discovery|        |-- linkis-service-gateway //Gateway|-- db //Database information|-- license-doc //license details|        |-- license //The license of the background project|         - ui-license //License of linkis management desk|-- tool //Tool script|        |-- check.sh|        |-- dependencies||-- web //Management desk code of linkis||-- scalastyle-config.xml //Scala code format check configuration file|-- CONTRIBUTING.md|-- CONTRIBUTING_CN.md|-- DISCLAIMER-WIP|-- LICENSE //LICENSE of the project source code|-- LICENSE-binary //LICENSE of binary package|-- LICENSE-binary-ui //LICENSE of the front-end compiled package|-- NOTICE //NOTICE of project source code|-- NOTICE-binary // NOTICE of binary package|-- NOTICE-binary-ui // NOTICE of front-end binary package|-- licenses-binary The detailed dependent license file of the binary package|-- licenses-binary-ui //The license file that the front-end compilation package depends on in detail|-- README.md|-- README_CN.md
    - + \ No newline at end of file diff --git a/docs/latest/deployment/start_metadatasource/index.html b/docs/latest/deployment/start_metadatasource/index.html index 445247d76cf..d0dfca0496a 100644 --- a/docs/latest/deployment/start_metadatasource/index.html +++ b/docs/latest/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ DataSource | Apache Linkis - + @@ -71,7 +71,7 @@ }}
    - + \ No newline at end of file diff --git a/docs/latest/deployment/unpack_hierarchical_structure/index.html b/docs/latest/deployment/unpack_hierarchical_structure/index.html index d022fc941b2..80aad550f1e 100644 --- a/docs/latest/deployment/unpack_hierarchical_structure/index.html +++ b/docs/latest/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ installation package directory structure | Apache Linkis - + @@ -17,7 +17,7 @@
    - + \ No newline at end of file diff --git a/docs/latest/deployment/web_install/index.html b/docs/latest/deployment/web_install/index.html index f1135e2386e..79f82ddc925 100644 --- a/docs/latest/deployment/web_install/index.html +++ b/docs/latest/deployment/web_install/index.html @@ -7,7 +7,7 @@ Linkis Console Deployment | Apache Linkis - + @@ -21,7 +21,7 @@
    1. Copy the front-end package to the corresponding directory: /appcom/Install/linkis/dist; # The directory where the front-end package is decompressed

    2. Start the service sudo systemctl restart nginx

    3. After execution, you can directly access it in Google browser: http://nginx_ip:nginx_port

    3. Common problems#

    (1) Upload file size limit

    sudo vi /etc/nginx/nginx.conf

    Change upload size

    client_max_body_size 200m

    (2) Interface timeout

    sudo vi /etc/nginx/conf.d/linkis.conf

    Change interface timeout

    proxy_read_timeout 600s
    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_compile_and_package/index.html b/docs/latest/development/linkis_compile_and_package/index.html index 996eeed36f8..86d8e0220d2 100644 --- a/docs/latest/development/linkis_compile_and_package/index.html +++ b/docs/latest/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Compile And Package | Apache Linkis - + @@ -20,7 +20,7 @@ Modify the dependency hadoop-hdfs to hadoop-hdfs-client:

     <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs</artifactId> <!-- Just replace this line with <artifactId>hadoop-hdfs-client</artifactId>-->        <version>${hadoop.version}</version></dependency> Modify hadoop-hdfs to: <dependency>        <groupId>org.apache.hadoop</groupId>        <artifactId>hadoop-hdfs-client</artifactId>        <version>${hadoop.version}</version></dependency>

    5.2 How to modify the Spark and Hive versions that Linkis depends on#

    Here's an example of changing the version of Spark. Go to the directory where the Spark engine is located and manually modify the Spark version information of the pom.xml file as follows:

        cd incubator-linkis-x.x.x/linkis-engineconn-plugins/engineconn-plugins/spark    vim pom.xml
        <properties>        <spark.version>2.4.3</spark.version> <!--> Modify the Spark version number here <-->    </properties>

    Modifying the version of other engines is similar to modifying the Spark version. First, enter the directory where the relevant engine is located, and manually modify the engine version information in the pom.xml file.

    Then please refer to 4. Compile an engine

    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_config/index.html b/docs/latest/development/linkis_config/index.html index bce85622fd8..e12f6319937 100644 --- a/docs/latest/development/linkis_config/index.html +++ b/docs/latest/development/linkis_config/index.html @@ -7,7 +7,7 @@ Introduction to Linkis Configuration Parameters | Apache Linkis - + @@ -27,7 +27,7 @@ It mainly specifies the startup parameters and runtime parameters of the engine. These parameters can be set on the client side. It is recommended to use the client side for personalized submission settings. Only the default values ​​are set on the page.

    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_debug/index.html b/docs/latest/development/linkis_debug/index.html index 0296e625417..1cceffa9161 100644 --- a/docs/latest/development/linkis_debug/index.html +++ b/docs/latest/development/linkis_debug/index.html @@ -7,7 +7,7 @@ Linkis Debug | Apache Linkis - + @@ -49,7 +49,7 @@ y

    - + \ No newline at end of file diff --git a/docs/latest/development/linkis_debug_in_mac/index.html b/docs/latest/development/linkis_debug_in_mac/index.html index e4cdc8cf50c..e26a608d569 100644 --- a/docs/latest/development/linkis_debug_in_mac/index.html +++ b/docs/latest/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ Linkis Debug In Mac | Apache Linkis - + @@ -51,7 +51,7 @@ wds.linkis.engineconn.plugin.loader.store.path=/Users/leojie/other_project/apache/linkis/incubator-linkis/linkis-engineconn-plugins/shell/target/out

    The two configurations here are mainly to specify the root directory of the engine storage, and the main purpose of specifying it as target/out is that after the engine-related code or configuration changes, the engineplugin service can be restarted directly to take effect.

    3.12 Set sudo password-free for the current user#

    When the engine is started, sudo needs to be used to execute the shell command to start the engine process. The current user on the mac generally needs to enter a password when using sudo. Therefore, it is necessary to set sudo password-free for the current user. The setting method is as follows:

    sudo chmod u-w /etc/sudoerssudo visudoReplace #%admin ALL=(ALL) AL with %admin ALL=(ALL) NOPASSWD: ALLsave file exit

    3.13 Service Testing#

    Make sure that the above services are all successfully started, and then test and submit the shell script job in postman.

    First visit the login interface to generate a cookie:

    login

    Then submit the shell code for execution

    POST: http://127.0.0.1:9001/api/rest_j/v1/entrance/submit

    body parameter:

    {  "executionContent": {    "code": "echo 'hello'",    "runType": "shell"  },  "params": {    "variable": {      "testvar": "hello"    },    "configuration": {      "runtime": {},      "startup": {}    }  },  "source": {    "scriptPath": "file:///tmp/hadoop/test.sql"  },  "labels": {    "engineType": "shell-1",    "userCreator": "leojie-IDE"  }}

    Results of the:

    {    "method": "/api/entrance/submit",    "status": 0,    "message": "OK",    "data": {        "taskID": 1,        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    Finally, check the running status of the task and get the running result set:

    GET http://127.0.0.1:9001/api/rest_j/v1/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress

    {    "method": "/api/entrance/exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0/progress",    "status": 0,    "message": "OK",    "data": {        "progress": 1,        "progressInfo": [],        "execID": "exec_id018017linkis-cg-entrance192.168.3.13:9104IDE_leojie_shell_0"    }}

    GET http://127.0.0.1:9001/api/rest_j/v1/jobhistory/1/get

    GET http://127.0.0.1:9001/api/rest_j/v1/filesystem/openFile?path=file:///Users/leojie/software/linkis/data/resultSetDir/leojie/linkis/2022-07-16/214859/IDE/1/1_0.dolphin

    {    "method": "/api/filesystem/openFile",    "status": 0,    "message": "OK",    "data": {        "metadata": "NULL",        "totalPage": 0,        "totalLine": 1,        "page": 1,        "type": "1",        "fileContent": [            [                "hello"            ]        ]    }}
    - + \ No newline at end of file diff --git a/docs/latest/development/new_engine_conn/index.html b/docs/latest/development/new_engine_conn/index.html index d3ee3cafcbe..b3856f3a5be 100644 --- a/docs/latest/development/new_engine_conn/index.html +++ b/docs/latest/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ How To Quickly Implement A New Engine | Apache Linkis - + @@ -52,7 +52,7 @@ const NODEICON = { [NODETYPE.JDBC]: { icon: jdbc, class: {'jdbc': true} },}

    Add the icon of the new engine in the web/src/apps/workflows/module/process/images/newIcon/ directory

    web/src/apps/workflows/module/process/images/newIcon/jdbc

    Also when contributing to the community, please consider the lincese or copyright of the svg file.

    3. Chapter Summary#

    The above content records the implementation process of the new engine, as well as some additional engine configurations that need to be done. At present, the expansion process of a new engine is still relatively cumbersome, and it is hoped that the expansion and installation of the new engine can be optimized in subsequent versions.

    - + \ No newline at end of file diff --git a/docs/latest/development/web_build/index.html b/docs/latest/development/web_build/index.html index 32b01c75b74..d5830dd9b3e 100644 --- a/docs/latest/development/web_build/index.html +++ b/docs/latest/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis Console Compile | Apache Linkis - + @@ -17,7 +17,7 @@ When you run the project in this way, the effect of your code changes will be dynamically reflected in the browser.

    Note: Because the project is developed separately from the front and back ends, when running on a local browser, the browser needs to be set to cross domains to access the back-end interface. For specific setting, please refer to solve the chrome cross domain problem.

    6. Common problem#

    6.1 npm install cannot succeed#

    If you encounter this situation, you can use the domestic Taobao npm mirror:

    npm install -g cnpm --registry=https://registry.npm.taobao.org

    Then, replace the npm install command by executing the following command

    cnpm install

    Note that when the project is started and packaged, you can still use the npm run build and npm run serve commands

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/flink/index.html b/docs/latest/engine_usage/flink/index.html index a52f803d6c6..ffadb4a2048 100644 --- a/docs/latest/engine_usage/flink/index.html +++ b/docs/latest/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ EngineConnPlugin Installation

    2.3 Flink engine tags#

    Linkis1.0 is done through tags, so we need to insert data in our database, the way of inserting is shown below.

    EngineConnPlugin Installation > 2.2 Configuration modification of management console (optional)

    3. The use of Flink engine#

    Preparation operation, queue setting#

    The Flink engine of Linkis 1.0 is started by flink on yarn, so you need to specify the queue used by the user. The way to specify the queue is shown in Figure 3-1.

    Figure 3-1 Queue settings

    Prepare knowledge, two ways to use Flink engine#

    Linkis’ Flink engine has two execution methods. One is the ComputationEngineConn method, which is mainly used in DSS-Scriptis or Streamis-Datasource for debugging sampling and verifying the correctness of the flink code; the other is the OnceEngineConn method , This method is mainly used to start a streaming application in the Streamis production center.

    Prepare knowledge, Connector plug-in of FlinkSQL#

    FlinkSQL can support a variety of data sources, such as binlog, kafka, hive, etc. If you want to use these data sources in Flink code, you need to put the plug-in jar packages of these connectors into the lib of the flink engine, and restart Linkis EnginePlugin service. If you want to use binlog as a data source in your FlinkSQL, then you need to put flink-connector-mysql-cdc-1.1.1.jar into the lib of the flink engine.

    cd ${LINKS_HOME}/sbinsh linkis-daemon.sh restart cg-engineplugin

    3.1 ComputationEngineConn method#

    In order to facilitate sampling and debugging, we have added a script type of fql to Scriptis, which is specifically used to execute FlinkSQL. But you need to ensure that your DSS has been upgraded to DSS1.0.0. After upgrading to DSS1.0.0, you can directly enter Scriptis and create a new fql script for editing and execution.

    FlinkSQL writing example, taking binlog as an example

    CREATE TABLE mysql_binlog ( id INT NOT NULL, name STRING, age INT) WITH ( 'connector' ='mysql-cdc', 'hostname' ='ip', 'port' ='port', 'username' ='username', 'password' ='password', 'database-name' ='dbname', 'table-name' ='tablename', 'debezium.snapshot.locking.mode' ='none' - It is recommended to add, otherwise the table will be locked);select * from mysql_binlog where id> 10;

    When debugging with select syntax in Scriptis, the Flink engine will have an automatic cancel mechanism, that is, when the specified time is reached or the number of sampled rows reaches the specified number, the Flink engine will actively cancel the task, and it will have been obtained The result set of is persisted, and then the front end will call the interface to open the result set to display the result set on the front end.

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, a cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of Hive is as follows:

    sh ./bin/linkis-cli -engineType flink-1.12.2 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    3.3 OnceEngineConn method#

    The use of OnceEngineConn is to officially start Flink's streaming application. Specifically, it calls LinkisManager's createEngineConn interface through LinkisManagerClient, and sends the code to the created Flink engine, and then the Flink engine starts to execute. This method can be used by other systems. Make a call, such as Streamis. The use of Client is also very simple, first create a new maven project, or introduce the following dependencies in your project

    <dependency>    <groupId>com.webank.wedatasphere.linkis</groupId>    <artifactId>linkis-computation-client</artifactId>    <version>${linkis.version}</version></dependency>

    Then create a new scala test file, click Execute to complete the analysis from one binlog data and insert it into another mysql database table. But it should be noted that you must create a new resources directory in the maven project, place a linkis.properties file, and specify the gateway address and api version of linkis, such as

    wds.linkis.server.version=v1wds.linkis.gateway.url=http://ip:9001/
    object OnceJobTest {  def main(args: Array[String]): Unit = {    val sql = """CREATE TABLE mysql_binlog (                | id INT NOT NULL,                | name STRING,                | age INT                |) WITH (                | 'connector' = 'mysql-cdc',                | 'hostname' = 'ip',                | 'port' = 'port',                | 'username' = '${username}',                | 'password' = '${password}',                | 'database-name' = '${database}',                | 'table-name' = '${tablename}',                | 'debezium.snapshot.locking.mode' = 'none'                |);                |CREATE TABLE sink_table (                | id INT NOT NULL,                | name STRING,                | age INT,                | primary key(id) not enforced                |) WITH (                |  'connector' = 'jdbc',                |  'url' = 'jdbc:mysql://${ip}:port/${database}',                |  'table-name' = '${tablename}',                |  'driver' = 'com.mysql.jdbc.Driver',                |  'username' = '${username}',                |  'password' = '${password}'                |);                |INSERT INTO sink_table SELECT id, name, age FROM mysql_binlog;                |""".stripMargin    val onceJob = SimpleOnceJob.builder().setCreateService("Flink-Test").addLabel(LabelKeyUtils.ENGINE_TYPE_LABEL_KEY, "flink-1.12.2")      .addLabel(LabelKeyUtils.USER_CREATOR_LABEL_KEY, "hadoop-Streamis").addLabel(LabelKeyUtils.ENGINE_CONN_MODE_LABEL_KEY, "once")      .addStartupParam(Configuration.IS_TEST_MODE.key, true)      //    .addStartupParam("label." + LabelKeyConstant.CODE_TYPE_KEY, "sql")      .setMaxSubmitTime(300000)      .addExecuteUser("hadoop").addJobContent("runType", "sql").addJobContent("code", sql).addSource("jobName", "OnceJobTest")      .build()    onceJob.submit()    println(onceJob.getId)    onceJob.waitForCompleted()    System.exit(0)  }}
    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/hive/index.html b/docs/latest/engine_usage/hive/index.html index 395a18dfbab..b7192df75b8 100644 --- a/docs/latest/engine_usage/hive/index.html +++ b/docs/latest/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive Engine Usage | Apache Linkis - + @@ -26,7 +26,7 @@ </loggers></configuration>
    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/jdbc/index.html b/docs/latest/engine_usage/jdbc/index.html index d53834fd38b..7eab81928b2 100644 --- a/docs/latest/engine_usage/jdbc/index.html +++ b/docs/latest/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "jdbc-4"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "jdbc"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of JDBC is as follows:

    sh ./bin/linkis-cli -engineType jdbc-4 -codeType jdbc -code "show tables"  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The way to use Scriptis is the simplest. You can go directly to Scriptis, right-click the directory and create a new JDBC script, write JDBC code and click Execute.

    The execution principle of JDBC is to load the JDBC Driver and submit sql to the SQL server for execution and obtain the result set and return.

    Figure 3-2 Screenshot of the execution effect of JDBC

    4. JDBC EngineConn user settings#

    JDBC user settings are mainly JDBC connection information, but it is recommended that users encrypt and manage this password and other information.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/openlookeng/index.html b/docs/latest/engine_usage/openlookeng/index.html index 8b51eac71a1..4981390ae51 100644 --- a/docs/latest/engine_usage/openlookeng/index.html +++ b/docs/latest/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLookEng Engine | Apache Linkis - + @@ -19,7 +19,7 @@ For the openlookeng task, you only need to modify the EngineConnType and CodeType parameters in the Demo:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "openlookeng-1.5.0"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType

    3.2 Task submission via Linkis-cli#

    After Linkis 1.0, the cli method is provided to submit tasks. We only need to specify the corresponding EngineConn and CodeType tag types. The use of openlookeng is as follows:

    sh ./bin/linkis-cli -engineType openlookeng-1.5.0 -codeType sql -code 'show databases;' -submitUser hadoop -proxyUser hadoop

    For specific usage, please refer to: Linkis CLI Manual.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/overview/index.html b/docs/latest/engine_usage/overview/index.html index a67430f28cf..2c7769d4113 100644 --- a/docs/latest/engine_usage/overview/index.html +++ b/docs/latest/engine_usage/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -16,7 +16,7 @@         The engine is a component that provides users with data processing and analysis capabilities. Currently, it has been connected to Linkis's engine, including mainstream big data computing engines Spark, Hive, Presto, etc. , There are also engines with the ability to process data in scripts such as python and Shell. DataSphereStudio is a one-stop data operation platform docked with Linkis. Users can conveniently use the engine supported by Linkis in DataSphereStudio to complete interactive data analysis tasks and workflow tasks.

    EngineWhether to support ScriptisWhether to support workflow
    SparkSupportSupport
    HiveSupportSupport
    PrestoSupportSupport
    ElasticSearchSupportSupport
    Pythonsupportsupport
    ShellSupportSupport
    JDBCSupportSupport
    MySQLSupportSupport
    FlinkSupportSupport

    2. Document structure#

    You can refer to the following documents for the related documents of the engines that have been accessed.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/pipeline/index.html b/docs/latest/engine_usage/pipeline/index.html index ba5745af30b..e557d06fdda 100644 --- a/docs/latest/engine_usage/pipeline/index.html +++ b/docs/latest/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ pipeline engine | Apache Linkis - + @@ -20,7 +20,7 @@

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/python/index.html b/docs/latest/engine_usage/python/index.html index 59a6cb19558..3d4faef3ed3 100644 --- a/docs/latest/engine_usage/python/index.html +++ b/docs/latest/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Gateway, and then the Python EngineConn submits the code to the python executor for execution.

    Figure 3-1 Screenshot of the execution effect of python

    4. Python EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the version of python and some modules that python needs to load.

    Figure 4-1 User-defined configuration management console of python

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/shell/index.html b/docs/latest/engine_usage/shell/index.html index 07285f56cbf..6568c3797a1 100644 --- a/docs/latest/engine_usage/shell/index.html +++ b/docs/latest/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell Engine Usage | Apache Linkis - + @@ -16,7 +16,7 @@ If you use Hive, you only need to make the following changes:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "shell-1"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "shell"); // required codeType

    3.2 How to use Linkis-cli#

    After Linkis 1.0, you can submit tasks through cli. We only need to specify the corresponding EngineConn and CodeType tag types. The use of shell is as follows:

    sh ./bin/linkis-cli -engineType shell-1 -codeType shell -code "echo \"hello\" "  -submitUser hadoop -proxyUser hadoop

    The specific usage can refer to Linkis CLI Manual.

    3.3 How to use Scriptis#

    The use of Scriptis is the simplest. You can directly enter Scriptis, right-click the directory and create a new shell script, write shell code and click Execute.

    The execution principle of the shell is that the shell EngineConn starts a system process to execute through the ProcessBuilder that comes with java, and redirects the output of the process to the EngineConn and writes it to the log.

    Figure 3-1 Screenshot of shell execution effect

    4. Shell EngineConn user settings#

    The shell EngineConn can generally set the maximum memory of the EngineConn JVM.

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/spark/index.html b/docs/latest/engine_usage/spark/index.html index e988f56cd63..fcac88b49db 100644 --- a/docs/latest/engine_usage/spark/index.html +++ b/docs/latest/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark Engine Usage | Apache Linkis - + @@ -18,7 +18,7 @@ Figure 3-4 pyspark execution mode

    4. Spark EngineConn user settings#

    In addition to the above EngineConn configuration, users can also make custom settings, such as the number of spark session executors and the memory of the executors. These parameters are for users to set their own spark parameters more freely, and other spark parameters can also be modified, such as the python version of pyspark.

    Figure 4-1 Spark user-defined configuration management console

    - + \ No newline at end of file diff --git a/docs/latest/engine_usage/sqoop/index.html b/docs/latest/engine_usage/sqoop/index.html index 06e6cbb98fd..6e1e05906a9 100644 --- a/docs/latest/engine_usage/sqoop/index.html +++ b/docs/latest/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop Engine | Apache Linkis - + @@ -25,7 +25,7 @@ def exportJob(jobBuilder: SimpleOnceJobBuilder): SubmittableSimpleOnceJob = { jobBuilder .addJobContent("sqoop.env.mapreduce.job.queuename", "queue1") .addJobContent("sqoop.mode", "import") .addJobContent("sqoop.args.connect", "jdbc:mysql://127.0.0.1:3306/exchangis") .addJobContent("sqoop.args.query", "select id as order, sno as great_time from" + " exchangis_table where sno =1 and $CONDITIONS") .addJobContent("sqoop.args.hcatalog.database", "hadoop") .addJobContent("sqoop.args.hcatalog.table", "partition_33") .addJobContent("sqoop.args.hcatalog.partition.keys", "month") .addJobContent("sqoop.args.hcatalog.partition.values", "4") .addJobContent("sqoop.args.num.mappers", "1") .build() }

    Parameter Comparison table (with native parameters):**

    sqoop.env.mapreduce.job.queuename<=>-Dmapreduce.job.queuenamesqoop.args.connection.manager<===>--connection-managersqoop.args.connection.param.file<===>--connection-param-filesqoop.args.driver<===>--driversqoop.args.hadoop.home<===>--hadoop-homesqoop.args.hadoop.mapred.home<===>--hadoop-mapred-homesqoop.args.help<===>helpsqoop.args.password<===>--passwordsqoop.args.password.alias<===>--password-aliassqoop.args.password.file<===>--password-filesqoop.args.relaxed.isolation<===>--relaxed-isolationsqoop.args.skip.dist.cache<===>--skip-dist-cachesqoop.args.username<===>--usernamesqoop.args.verbose<===>--verbosesqoop.args.append<===>--appendsqoop.args.as.avrodatafile<===>--as-avrodatafilesqoop.args.as.parquetfile<===>--as-parquetfilesqoop.args.as.sequencefile<===>--as-sequencefilesqoop.args.as.textfile<===>--as-textfilesqoop.args.autoreset.to.one.mapper<===>--autoreset-to-one-mappersqoop.args.boundary.query<===>--boundary-querysqoop.args.case.insensitive<===>--case-insensitivesqoop.args.columns<===>--columnssqoop.args.compression.codec<===>--compression-codecsqoop.args.delete.target.dir<===>--delete-target-dirsqoop.args.direct<===>--directsqoop.args.direct.split.size<===>--direct-split-sizesqoop.args.query<===>--querysqoop.args.fetch.size<===>--fetch-sizesqoop.args.inline.lob.limit<===>--inline-lob-limitsqoop.args.num.mappers<===>--num-mapperssqoop.args.mapreduce.job.name<===>--mapreduce-job-namesqoop.args.merge.key<===>--merge-keysqoop.args.split.by<===>--split-bysqoop.args.table<===>--tablesqoop.args.target.dir<===>--target-dirsqoop.args.validate<===>--validatesqoop.args.validation.failurehandler<===>--validation-failurehandlersqoop.args.validation.threshold<===> --validation-thresholdsqoop.args.validator<===>--validatorsqoop.args.warehouse.dir<===>--warehouse-dirsqoop.args.where<===>--wheresqoop.args.compress<===>--compresssqoop.args.check.column<===>--check-columnsqoop.args.incremental<===>--incrementalsqoop.args.last.value<===>--last-valuesqoop.args.enclosed.by<===>--enclosed-bysqoop.args.escaped.by<===>--escaped-bysqoop.args.fields.terminated.by<===>--fields-terminated-bysqoop.args.lines.terminated.by<===>--lines-terminated-bysqoop.args.mysql.delimiters<===>--mysql-delimiterssqoop.args.optionally.enclosed.by<===>--optionally-enclosed-bysqoop.args.input.enclosed.by<===>--input-enclosed-bysqoop.args.input.escaped.by<===>--input-escaped-bysqoop.args.input.fields.terminated.by<===>--input-fields-terminated-bysqoop.args.input.lines.terminated.by<===>--input-lines-terminated-bysqoop.args.input.optionally.enclosed.by<===>--input-optionally-enclosed-bysqoop.args.create.hive.table<===>--create-hive-tablesqoop.args.hive.delims.replacement<===>--hive-delims-replacementsqoop.args.hive.database<===>--hive-databasesqoop.args.hive.drop.import.delims<===>--hive-drop-import-delimssqoop.args.hive.home<===>--hive-homesqoop.args.hive.import<===>--hive-importsqoop.args.hive.overwrite<===>--hive-overwritesqoop.args.hive.partition.value<===>--hive-partition-valuesqoop.args.hive.table<===>--hive-tablesqoop.args.column.family<===>--column-familysqoop.args.hbase.bulkload<===>--hbase-bulkloadsqoop.args.hbase.create.table<===>--hbase-create-tablesqoop.args.hbase.row.key<===>--hbase-row-keysqoop.args.hbase.table<===>--hbase-tablesqoop.args.hcatalog.database<===>--hcatalog-databasesqoop.args.hcatalog.home<===>--hcatalog-homesqoop.args.hcatalog.partition.keys<===>--hcatalog-partition-keyssqoop.args.hcatalog.partition.values<===>--hcatalog-partition-valuessqoop.args.hcatalog.table<===>--hcatalog-tablesqoop.args.hive.partition.key<===>--hive-partition-keysqoop.args.map.column.hive<===>--map-column-hivesqoop.args.create.hcatalog.table<===>--create-hcatalog-tablesqoop.args.hcatalog.storage.stanza<===>--hcatalog-storage-stanzasqoop.args.accumulo.batch.size<===>--accumulo-batch-sizesqoop.args.accumulo.column.family<===>--accumulo-column-familysqoop.args.accumulo.create.table<===>--accumulo-create-tablesqoop.args.accumulo.instance<===>--accumulo-instancesqoop.args.accumulo.max.latency<===>--accumulo-max-latencysqoop.args.accumulo.password<===>--accumulo-passwordsqoop.args.accumulo.row.key<===>--accumulo-row-keysqoop.args.accumulo.table<===>--accumulo-tablesqoop.args.accumulo.user<===>--accumulo-usersqoop.args.accumulo.visibility<===>--accumulo-visibilitysqoop.args.accumulo.zookeepers<===>--accumulo-zookeeperssqoop.args.bindir<===>--bindirsqoop.args.class.name<===>--class-namesqoop.args.input.null.non.string<===>--input-null-non-stringsqoop.args.input.null.string<===>--input-null-stringsqoop.args.jar.file<===>--jar-filesqoop.args.map.column.java<===>--map-column-javasqoop.args.null.non.string<===>--null-non-stringsqoop.args.null.string<===>--null-stringsqoop.args.outdir<===>--outdirsqoop.args.package.name<===>--package-namesqoop.args.conf<===>-confsqoop.args.D<===>-Dsqoop.args.fs<===>-fssqoop.args.jt<===>-jtsqoop.args.files<===>-filessqoop.args.libjars<===>-libjarssqoop.args.archives<===>-archivessqoop.args.update.key<===>--update-keysqoop.args.update.mode<===>--update-modesqoop.args.export.dir<===>--export-dir
    - + \ No newline at end of file diff --git a/docs/latest/introduction/index.html b/docs/latest/introduction/index.html index e312603005f..d25ecd8a8aa 100644 --- a/docs/latest/introduction/index.html +++ b/docs/latest/introduction/index.html @@ -7,7 +7,7 @@ Introduction | Apache Linkis - + @@ -20,7 +20,7 @@ Since the first release of Linkis in 2019, it has accumulated more than 700 trial companies and 1000+ sandbox trial users, which involving diverse industries, from finance, banking, tele-communication, to manufactory, internet companies and so on.

    - + \ No newline at end of file diff --git a/docs/latest/release/index.html b/docs/latest/release/index.html index 8eee0df0554..894eeba237e 100644 --- a/docs/latest/release/index.html +++ b/docs/latest/release/index.html @@ -7,7 +7,7 @@ Version Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Version Overview

    Configuration Item#

    Module Name (Service Name)TypeParameter NameDefault ValueDescription
    commonNewlinkis.codeType.runType.relationsql=>sql|hql|jdbc|hive|psql|fql,
    python=>python|py|pyspark,< br/>java=>java,scala=>scala,
    shell=>sh|shell
    mapping relationship between codeType and runType
    rpcNewlinkis.rpc.spring.params.enablefalseControls the ribbon mode parameter switch of the RPC module
    ecNewlinkis.engineconn.max.parallelism300Asynchronous execution supports setting the number of concurrent job groups
    ecNewlinkis.engineconn.async.group.max.running10
    ec-flinkNewlinkis.flink.execution.attachedtrue
    ec-flinkNewlinkis.flink.kerberos.enablefalse
    ec-flinkNewlinkis.flink.kerberos.login.contextsClient,KafkaClient
    ec-flinkNewlinkis.flink.kerberos.login.keytab
    ec-flinkNewlinkis.flink.kerberos.login.principal
    ec-flinkNewlinkis.flink.kerberos.krb5-conf.path
    ec-flinkNewlinkis.flink.params.placeholder.blank\0x001
    ec-sqoopNewsqoop.task.map.memory2
    ec-sqoopNewsqoop.task.map.cpu.cores1
    ec-sqoopNewsqoop.params.name.modesqoop.mode
    ec-sqoopNewsqoop.params.name.prefixsqoop.args.
    ec-sqoopNewsqoop.params.name.env.prefixsqoop.env.
    ec-sqoopNewlinkis.hadoop.site.xml/etc/hadoop/conf/core-site.xml;
    /etc/hadoop/conf/hdfs- site.xml;
    /etc/hadoop/conf/yarn-site.xml;
    /etc/hadoop/conf/mapred-site.xml
    set sqoop to load hadoop parameter file location
    ec-sqoopNewsqoop.fetch.status.interval5sSets the interval for obtaining sqoop execution status

    DB Table Changes#

    no change

    - + \ No newline at end of file diff --git a/docs/latest/table/udf-table/index.html b/docs/latest/table/udf-table/index.html index 0c280ede009..3ac926ff292 100644 --- a/docs/latest/table/udf-table/index.html +++ b/docs/latest/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF table structure | Apache Linkis - + @@ -16,7 +16,7 @@ udf_type 3: custom function - python functionudf_type 4: custom function - scala function

    2 linkis_ps_udf_manager#

    The administrator user table of the udf function, with sharing permissions, only the front end of the udf administrator has a shared entry

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2user_namevarchar(20)YES

    ##3 linkis_ps_udf_shared_info

    udf shared record table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3user_nameusername used by the sharevarchar(50)NO

    ##4 linkis_ps_udf_tree

    Tree-level record table for udf classification

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2parentparent categorybigint(20)NO
    3nameClass name of the functionvarchar(100)YES
    4user_nameusernamevarchar(50)NO
    5descriptiondescription informationvarchar(255)YES
    6create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    7update_timetimestampNOCURRENT_TIMESTAMP
    8categorycategory distinction udf / functionvarchar(50)YES

    ##5 linkis_ps_udf_user_load

    Whether udf is the configuration loaded by default

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfoint(11)NO
    3user_nameuser ownedvarchar(50)NO

    ##6 linkis_ps_udf_version

    udf version information table

    numbernamedescriptiontypekeyemptyextradefault value
    1idbigint(20)PRINOauto_increment
    2udf_idid of linkis_ps_udf_baseinfobigint(20)NO
    3pathThe local path of the uploaded script/jar packagevarchar(255)NO
    4bml_resource_idMaterial resource id in bmlvarchar(50)NO
    5bml_resource_versionbml material versionvarchar(20)NO
    6is_publishedwhether to publishbit(1)YES
    7register_formatregistration formatvarchar(255)YES
    8use_formatuse formatvarchar(255)YES
    9descriptionVersion descriptionvarchar(255)NO
    10create_timetimestampNOon update CURRENT_TIMESTAMPCURRENT_TIMESTAMP
    11md5varchar(100)YES

    ##ER diagram

    image

    - + \ No newline at end of file diff --git a/docs/latest/tags/index.html b/docs/latest/tags/index.html index 297d2d7d7dc..69e69a4a453 100644 --- a/docs/latest/tags/index.html +++ b/docs/latest/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -15,7 +15,7 @@

    Tags

    - + \ No newline at end of file diff --git a/docs/latest/tuning_and_troubleshooting/configuration/index.html b/docs/latest/tuning_and_troubleshooting/configuration/index.html index 1305fc2a279..5d6811def54 100644 --- a/docs/latest/tuning_and_troubleshooting/configuration/index.html +++ b/docs/latest/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ Configurations | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Linkis1.0 Configurations

    The configuration of Linkis1.0 is simplified on the basis of Linkis0.x. A public configuration file linkis.properties is provided in the conf directory to avoid the need for common configuration parameters to be configured in multiple microservices at the same time. This document will list the parameters of Linkis1.0 in modules.

            Please be noticed: This article only lists all the configuration parameters related to Linkis that have an impact on operating performance or environment dependence. Many configuration parameters that do not need users to care about have been omitted. If users are interested, they can browse through the source code.

    1 General configuration#

            The general configuration can be set in the global linkis.properties, one setting, each microservice can take effect.

    1.1 Global configurations#

    Parameter nameDefault valueDescription
    wds.linkis.encodingutf-8Linkis default encoding format
    wds.linkis.date.patternyyyy-MM-dd'T'HH:mm:ssZDefault date format
    wds.linkis.test.modefalseWhether to enable debugging mode, if set to true, all microservices support password-free login, and all EngineConn open remote debugging ports
    wds.linkis.test.userNoneWhen wds.linkis.test.mode=true, the default login user for password-free login
    wds.linkis.home/appcom/Install/LinkisInstallLinkis installation directory, if it does not exist, it will automatically get the value of LINKIS_HOME
    wds.linkis.httpclient.default.connect.timeOut50000Linkis HttpClient default connection timeout

    1.2 LDAP configurations#

    Parameter nameDefault valueDescription
    wds.linkis.ldap.proxy.urlNoneLDAP URL address
    wds.linkis.ldap.proxy.baseDNNoneLDAP baseDN address
    wds.linkis.ldap.proxy.userNameFormatNone

    1.3 Hadoop configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.hadoop.root.userhadoopHDFS super user
    wds.linkis.filesystem.hdfs.root.pathNoneUser's HDFS default root path
    wds.linkis.keytab.enablefalseWhether to enable kerberos
    wds.linkis.keytab.file/appcom/keytabKerberos keytab path, effective only when wds.linkis.keytab.enable=true
    wds.linkis.keytab.host.enabledfalse
    wds.linkis.keytab.host127.0.0.1
    hadoop.config.dirNoneIf not configured, it will be read from the environment variable HADOOP_CONF_DIR
    wds.linkis.hadoop.external.conf.dir.prefix/appcom/config/external-conf/hadoophadoop additional configuration

    1.4 Linkis RPC configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.rpc.broadcast.thread.num10Linkis RPC broadcast thread number (Recommended default value)
    wds.linkis.ms.rpc.sync.timeout60000Linkis RPC Receiver's default processing timeout time
    wds.linkis.rpc.eureka.client.refresh.interval1sRefresh interval of Eureka client's microservice list (Recommended default value)
    wds.linkis.rpc.eureka.client.refresh.wait.time.max1mRefresh maximum waiting time (recommended default value)
    wds.linkis.rpc.receiver.asyn.consumer.thread.max10Maximum number of Receiver Consumer threads (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.receiver.asyn.consumer.freeTime.max2mReceiver Consumer maximum idle time
    wds.linkis.rpc.receiver.asyn.queue.size.max1000The maximum number of buffers in the receiver consumption queue (If there are many online users, it is recommended to increase this parameter appropriately)
    wds.linkis.rpc.sender.asyn.consumer.thread.max", 5Sender Consumer maximum number of threads
    wds.linkis.rpc.sender.asyn.consumer.freeTime.max2mSender Consumer Maximum Free Time
    wds.linkis.rpc.sender.asyn.queue.size.max300Sender consumption queue maximum buffer number

    2. Calculate governance configuration parameters#

    2.1 Entrance configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.spark.engine.version2.4.3The default Spark version used when the user submits a script without specifying a version
    wds.linkis.hive.engine.version1.2.1The default Hive version used when the user submits a script without a specified version
    wds.linkis.python.engine.versionpython2The default Python version used when the user submits a script without specifying a version
    wds.linkis.jdbc.engine.version4The default JDBC version used when the user submits the script without specifying the version
    wds.linkis.shell.engine.version1The default shell version used when the user submits a script without specifying a version
    wds.linkis.appconn.engine.versionv1The default AppConn version used when the user submits a script without a specified version
    wds.linkis.entrance.scheduler.maxParallelismUsers1000Maximum number of concurrent users supported by Entrance
    wds.linkis.entrance.job.persist.wait.max5mMaximum time for Entrance to wait for JobHistory to persist a Job
    wds.linkis.entrance.config.log.pathNoneIf not configured, the value of wds.linkis.filesystem.hdfs.root.path is used by default
    wds.linkis.default.requestApplication.nameIDEThe default submission system when the submission system is not specified
    wds.linkis.default.runTypesqlThe default script type when the script type is not specified
    wds.linkis.warn.log.excludeorg.apache,hive.ql,hive.metastore,com.netflix,com.webank.wedatasphereReal-time WARN-level logs that are not output to the client by default
    wds.linkis.log.excludeorg.apache, hive.ql, hive.metastore, com.netflix, com.webank.wedatasphere, com.webankReal-time INFO-level logs that are not output to the client by default
    wds.linkis.instance3User's default number of concurrent jobs per engine
    wds.linkis.max.ask.executor.time5mApply to LinkisManager for the maximum time available for EngineConn
    wds.linkis.hive.special.log.includeorg.apache.hadoop.hive.ql.exec.TaskWhen pushing Hive logs to the client, which logs are not filtered by default
    wds.linkis.spark.special.log.includeorg.apache.linkis.engine.spark.utils.JobProgressUtilWhen pushing Spark logs to the client, which logs are not filtered by default
    wds.linkis.entrance.shell.danger.check.enabledfalseWhether to check and block dangerous shell syntax
    wds.linkis.shell.danger.usagerm,sh,find,kill,python,for,source,hdfs,hadoop,spark-sql,spark-submit,pyspark,spark-shell,hive,yarnShell default Dangerous grammar
    wds.linkis.shell.white.usagecd,lsShell whitelist syntax
    wds.linkis.sql.default.limit5000SQL default maximum return result set rows

    2.2 EngineConn configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engineconn.resultSet.default.store.pathhdfs:///tmpJob result set default storage path
    wds.linkis.engine.resultSet.cache.max0kWhen the size of the result set is lower than how much, EngineConn will return to Entrance without placing the disk.
    wds.linkis.engine.default.limit5000
    wds.linkis.engine.lock.expire.time120000The maximum idle time of the engine lock, that is, after Entrance applies for the lock, how long does it take to submit code to EngineConn will be released
    wds.linkis.engineconn.ignore.wordsorg.apache.spark.deploy.yarn.ClientLogs that are ignored by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.pass.wordsorg.apache.hadoop.hive.ql.exec.TaskThe log that must be pushed by default when the Engine pushes logs to the Entrance side
    wds.linkis.engineconn.heartbeat.time3mDefault heartbeat interval from EngineConn to LinkisManager
    wds.linkis.engineconn.max.free.time1hEngineConn's maximum free time

    2.3 EngineConnManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.ecm.memory.max80gECM's maximum bootable EngineConn memory
    wds.linkis.ecm.cores.max50ECM's maximum number of CPUs that can start EngineConn
    wds.linkis.ecm.engineconn.instances.max50The maximum number of EngineConn that can be started, it is generally recommended to set the same as wds.linkis.ecm.cores.max
    wds.linkis.ecm.protected.memory4gECM protected memory, that is, the memory used by ECM to start EngineConn cannot exceed wds.linkis.ecm.memory.max-wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.cores.max2The number of protected CPUs of ECM, the meaning is the same as wds.linkis.ecm.protected.memory
    wds.linkis.ecm.protected.engine.instances2Number of protected instances of ECM
    wds.linkis.engineconn.wait.callback.pid3sWaiting time for EngineConn to return pid

    2.4 LinkisManager configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.manager.am.engine.start.max.time"10mThe maximum start time for LinkisManager to start a new EngineConn
    wds.linkis.manager.am.engine.reuse.max.time5mLinkisManager reuses an existing EngineConn's maximum selection time
    wds.linkis.manager.am.engine.reuse.count.limit10LinkisManager reuses an existing EngineConn's maximum polling times
    wds.linkis.multi.user.engine.typesjdbc,es,prestoWhen LinkisManager reuses an existing EngineConn, which engine users are not used as reuse rules
    wds.linkis.rm.instance10The default maximum number of instances per user per engine
    wds.linkis.rm.yarnqueue.cores.max150Maximum number of cores per user in each engine usage queue
    wds.linkis.rm.yarnqueue.memory.max450gThe maximum amount of memory per user in each engine's use queue
    wds.linkis.rm.yarnqueue.instance.max30The maximum number of applications launched by each user in the queue of each engine

    3. Each engine configuration parameter#

    3.1 JDBC engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jdbc.default.limit5000The default maximum return result set rows
    wds.linkis.jdbc.support.dbsmysql=>com.mysql.jdbc.Driver,postgresql=>org.postgresql.Driver,oracle=>oracle.jdbc.driver.OracleDriver,hive2=>org.apache.hive .jdbc.HiveDriver,presto=>com.facebook.presto.jdbc.PrestoDriverDrivers supported by JDBC engine
    wds.linkis.engineconn.jdbc.concurrent.limit100Maximum number of concurrent SQL executions

    3.2 Python engine configuration parameters#

    Parameter nameDefault valueDescription
    pythonVersion/appcom/Install/anaconda3/bin/pythonPython command path
    python.pathNoneSpecify an additional path for Python, which only accepts shared storage paths

    3.3 Spark engine configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.engine.spark.language-repl.init.time30sMaximum initialization time for Scala and Python command interpreters
    PYSPARK_DRIVER_PYTHONpythonPython command path
    wds.linkis.server.spark-submitspark-submitspark-submit command path

    4. PublicEnhancements configuration parameters#

    4.1 BML configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.bml.dws.versionv1Version number requested by Linkis Restful
    wds.linkis.bml.auth.token.keyValidation-CodePassword-free token-key for BML request
    wds.linkis.bml.auth.token.valueBML-AUTHPassword-free token-value requested by BML
    wds.linkis.bml.hdfs.prefix/tmp/linkisThe prefix file path of the BML file stored on hdfs

    4.2 Metadata configuration parameters#

    Parameter nameDefault valueDescription
    hadoop.config.dir/appcom/config/hadoop-configIf it does not exist, the value of the environment variable HADOOP_CONF_DIR is used by default
    hive.config.dir/appcom/config/hive-configIf it does not exist, the value of the environment variable HIVE_CONF_DIR is used by default
    hive.meta.urlNoneThe URL of the HiveMetaStore database. If hive.config.dir is not configured, this value must be configured
    hive.meta.userNoneUser of the HiveMetaStore database
    hive.meta.passwordNonePassword of the HiveMetaStore database

    4.3 JobHistory configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.jobhistory.adminNoneThe default Admin account is used to specify which users can view the execution history of everyone

    4.4 FileSystem configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.filesystem.root.pathfile:///tmp/linkis/User's Linux local root directory
    wds.linkis.filesystem.hdfs.root.pathhdfs:///tmp/User's HDFS root directory
    wds.linkis.workspace.filesystem.hdfsuserrootpath.suffix/linkis/The first-level prefix after the user's HDFS root directory. The user's actual root directory is: ${hdfs.root.path}\${user}\${ hdfsuserrootpath.suffix}
    wds.linkis.workspace.resultset.download.is.limittrueWhen Client downloads the result set, whether to limit the number of downloads
    wds.linkis.workspace.resultset.download.maxsize.csv5000When the result set is downloaded as a CSV file, the number of downloads is limited
    wds.linkis.workspace.resultset.download.maxsize.excel5000When the result set is downloaded as an Excel file, the number of downloads is limited
    wds.linkis.workspace.filesystem.get.timeout2000LThe maximum timeout period for requesting the underlying file system. (If the performance of your HDFS or Linux machine is low, it is recommended to increase the check number appropriately)

    4.5 UDF configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.udf.share.path/mnt/bdap/udfThe storage path of the shared UDF, it is recommended to set it to the HDFS path

    5. MicroService configuration parameters#

    5.1 Gateway configuration parameters#

    Parameter nameDefault valueDescription
    wds.linkis.gateway.conf.enable.proxy.userfalseWhether to enable proxy user mode, if enabled, the login user’s request will be proxied to the proxy user for execution
    wds.linkis.gateway.conf.proxy.user.configproxy.propertiesStorage file of proxy rules
    wds.linkis.gateway.conf.proxy.user.scan.interval600000Proxy file refresh interval
    wds.linkis.gateway.conf.enable.token.authfalseWhether to enable the Token login mode, if enabled, allow access to Linkis in the form of tokens
    wds.linkis.gateway.conf.token.auth.configtoken.propertiesToken rule storage file
    wds.linkis.gateway.conf.token.auth.scan.interval600000Token file refresh interval
    wds.linkis.gateway.conf.url.pass.auth/dws/Request for default release without login verification
    wds.linkis.gateway.conf.enable.ssofalseWhether to enable SSO user login mode
    wds.linkis.gateway.conf.sso.interceptorNoneIf the SSO login mode is enabled, the user needs to implement SSOInterceptor to jump to the SSO login page
    wds.linkis.admin.userhadoopAdministrator user list
    wds.linkis.login_encrypt.enablefalseWhen the user logs in, does the password enable RSA encryption transmission
    wds.linkis.enable.gateway.authfalseWhether to enable the Gateway IP whitelist mechanism
    wds.linkis.gateway.auth.fileauth.txtIP whitelist storage file

    6. DataSource and Metadata Service configuration parameters#

    6.1 MetaData Service configuration parameters#

    From VersionParameter nameDefault valueDescription
    v1.1.0wds.linkis.server.mdm.service.lib.dir/lib/linkis-pulicxxxx-/linkis-metdata-manager/serviceSpecify the relative path of the service to be loaded
    v1.1.0wds.linkis.server.mdm.service.instance.expire-in-seconds60Set the service loading timeout. If it exceeds the specified time, it will not be loaded
    v1.1.0wds.linkis.server.dsm.app.namelinkis-ps-data-source-managerSet the service to get the data source
    v1.1.0wds.linkis.server.mdm.service.kerberos.principlehadoop/HOST@EXAMPLE.COMset kerberos principle for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.userhadoopset user for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.kerberos.krb5.path""set kerberos krb5 path for linkis-metadata hive service
    v1.1.0wds.linkis.server.mdm.service.temp.locationclasspath:/tmpset tmp loc for linkis-metadata hive and kafka service
    v1.1.0wds.linkis.server.mdm.service.sql.drivercom.mysql.jdbc.Driverset driver for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.urljdbc:mysql://%s:%s/%sset url format for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.connect.timeout3000set timeout for mysql connect for hive-metadata mysql service
    v1.1.0wds.linkis.server.mdm.service.sql.socket.timeout6000set timeout for socket open for hive-metadata mysql service
    - + \ No newline at end of file diff --git a/docs/latest/tuning_and_troubleshooting/overview/index.html b/docs/latest/tuning_and_troubleshooting/overview/index.html index ad1ae084cc8..c8b13b2cf14 100644 --- a/docs/latest/tuning_and_troubleshooting/overview/index.html +++ b/docs/latest/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -17,7 +17,7 @@ The compatibility of the os version is the best, and some system versions may have command incompatibility. For example, the poor compatibility of yum in ubantu may cause yum-related errors in the installation and deployment. In addition, it is also recommended not to use windows as much as possible. Deploying linkis, currently no script is fully compatible with the .bat command.

  • Missing configuration item: There are two configuration files that need to be modified in linkis1.0 version, linkis-env.sh and db.sh

    The former contains the environment parameters that linkis needs to load during execution, and the latter is the database information that linkis itself needs to store related tables. Under normal circumstances, if the corresponding configuration is missing, the error message will show an exception related to the Key value. For example, when db.sh does not fill in the relevant database configuration, unknow will appear mysql server host ‘-P’ is abnormal, which is caused by missing host.

  • Report error when starting microservice

    Linkis puts the log files of all microservices into the logs directory. The log directory levels are as follows:

    ├── linkis-computation-governance│ ├── linkis-cg-engineconnmanager│ ├── linkis-cg-engineplugin│ ├── linkis-cg-entrance│ └── linkis-cg-linkismanager├── linkis-public-enhancements│ ├── linkis-ps-bml│ ├── linkis-ps-cs│ ├── linkis-ps-datasource│ └── linkis-ps-publicservice└── linkis-spring-cloud-services│ ├── linkis-mg-eureka└─├── linkis-mg-gateway

    It includes three microservice modules: computing governance, public enhancement, and microservice management. Each microservice contains three logs, linkis-gc.log, linkis.log, and linkis.out, corresponding to the service's GC log, service log, and service System.out log.

    Under normal circumstances, when an error occurs when starting a microservice, you can cd to the corresponding service in the log directory to view the related log to troubleshoot the problem. Generally, the most frequently occurring problems can also be divided into three categories:

    1. Port Occupation: Since the default port of Linkis microservices is mostly concentrated at 9000, it is necessary to check whether the port of each microservice is occupied by other microservices before starting. If it is occupied, you need to change conf/ The microservice port corresponding to the linkis-env.sh file

    2. Necessary configuration parameters are missing: For some microservices, certain user-defined parameters must be loaded before they can be started normally. For example, the linkis-cg-engineplugin microservice will load conf/ when it starts. For the configuration related to wds.linkis.engineconn.* in linkis.properties, if the user changes the Linkis path after installation, if the configuration does not correspond to the modification, an error will be reported when the linkis-cg-engineplugin microservice is started.

    3. System environment is not compatible: It is recommended that users refer to the recommended system and application versions in the official documents as much as possible when deploying and installing, and install necessary system plug-ins, such as expect, yum, etc. If the application version is not compatible, It may cause errors related to the application. For example, the incompatibility of SQL statements in the mysql5.7 version may cause errors in the linkis.ddl and linkis.dml files when initializing the db during the installation process. You need to refer to the "Q\&A Problem Summary" or the deployment documentation to make the corresponding settings.

  • Report error during microservice execution period

    The situation of error reporting during the execution of microservices is more complicated, and the situations encountered are also different depending on the environment, but the troubleshooting methods are basically the same. Starting from the corresponding microservice error catalog, we can roughly divide it into three situations:

    1. Manually installed and deployed microservices report errors: The logs of this type of microservice are unified under the log/ directory. After locating the microservice, enter the corresponding directory to view it.

    2. engine start failure: insufficient resources, request engine failure: When this type of error occurs, it is not necessarily due to insufficient resources, because the front end will only grab the logs after the Spring project is started, for errors before the engine is started cannot be fetched well. There are three kinds of high-frequency problems found in the actual use process of internal test users:

      a. The engine cannot be created because there is no engine directory permission: The log will be printed to the linkis.out file under the cg-engineconnmanager microservice. You need to enter the file to view the specific reason.

      b. There is a dependency conflict in the engine lib package, The server cannot start normally because of insufficient memory resources: Since the engine directory has been created, the log will be printed to the stdout file under the engine, and the engine path can refer to c

      c. Errors reported during engine execution: Each started engine is a microservice that is dynamically loaded and started during runtime. When the engine is started, if an error occurs, you need to find the corresponding log of the engine in the corresponding startup user directory. The corresponding root path is ENGINECONN_ROOT_PATH filled in linkis-env before installation. If you need to modify the path after installation, you need to modify wds.linkis.engineconn.root.dir in linkis.properties.

  • Ⅴ. Community user group consultation and communication#

    For problems that cannot be resolved according to the above process positioning during the installation and deployment process, you can send error messages in our community group. In order to facilitate community partners and developers to help solve them and improve efficiency, it is recommended that when you ask questions, You can describe the problem phenomenon, related log information, and the places that have been checked are sent out together. If you think it may be an environmental problem, you need to list the corresponding application version together**. We provide two online groups: WeChat group and QQ group. The communication channels and specific contact information can be found at the bottom of the Linkis github homepage.

    Ⅵ. locate the source code by remote debug#

    Under normal circumstances, remote debugging of source code is the most effective way to locate problems, but compared to document review, users need to have a certain understanding of the source code structure. It is recommended that you check the Linkis source code level detailed structure in the Linkis WIKI before remote debugging.After having a certain degree of familiarity to the the source code structure of the project, after a certain degree of familiarity, you can refer to How to Debug Linkis.

    - + \ No newline at end of file diff --git a/docs/latest/tuning_and_troubleshooting/tuning/index.html b/docs/latest/tuning_and_troubleshooting/tuning/index.html index b08ee12b054..9872370cca7 100644 --- a/docs/latest/tuning_and_troubleshooting/tuning/index.html +++ b/docs/latest/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ Tuning | Apache Linkis - + @@ -16,7 +16,7 @@ override def getOrCreateGroup(groupName: String): Group = { if (!groupNameToGroups.containsKey(groupName)) synchronized { val initCapacity = 100 val maxCapacity = 100 // other codes... } }

    4. Resource settings related to task runtime#

    When submitting a task to run on Yarn, Yarn provides a configurable interface. As a highly scalable framework, Linkis can also be configured to set resource configuration.

    The related configuration of Spark and Hive are as follows:

    Part of the Spark configuration in linkis-engineconn-plugins/engineconn-plugins, you can adjust the configuration to change the runtime environment of tasks submitted to Yarn. Due to limited space, such as more about Hive, Yarn configuration requires users to refer to the source code and the parameters documentation.

        "spark.driver.memory" = 2 //Unit is G    "wds.linkis.driver.cores" = 1    "spark.executor.memory" = 4 //Unit is G    "spark.executor.cores" = 2    "spark.executor.instances" = 3    "wds.linkis.rm.yarnqueue" = "default"
    - + \ No newline at end of file diff --git a/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 5b0d8bd2866..2cb73e166d0 100644 --- a/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ Upgrade From 0.X To 1.0 Guide | Apache Linkis - + @@ -16,7 +16,7 @@ Please input the choice: ## choice 1

    3. Database upgrade#

         After the service is installed, the database structure needs to be modified, including table structure changes and new tables and data:

    3.1 Table structure modification part:#

         linkis_task: The submit_user and label_json fields are added to the table. The update statement is:

    ALTER TABLE linkis_task ADD submit_user varchar(50) DEFAULT NULL COMMENT 'submitUser name';ALTER TABLE linkis_task ADD `label_json` varchar(200) DEFAULT NULL COMMENT 'label json';

    3.2 Need newly executed sql:#

    cd db/module## Add the tables that the enginePlugin service depends on:source linkis_ecp.sql## Add a table that the public service-instanceLabel service depends onsource linkis_instance_label.sql## Added tables that the linkis-manager service depends onsource linkis_manager.sql

    3.3 Publicservice-Configuration table modification#

         In order to support the full labeling capability of Linkis 1.X, all the data tables related to the configuration module have been upgraded to labeling, which is completely different from the 0.X Configuration table. It is necessary to re-execute the table creation statement and the initialization statement.

         This means that Linkis0.X users' existing engine configuration parameters can no longer be migrated to Linkis1.0 (it is recommended that users reconfigure the engine parameters once).

         The execution of the table building statement is as follows:

    source linkis_configuration.sql

         Because Linkis 1.0 supports multiple versions of the engine, it is necessary to modify the version of the engine when executing the initialization statement, as shown below:

    vim linkis_configuration_dml.sql## Modify the default version of the corresponding engineSET @SPARK_LABEL="spark-2.4.3";SET @HIVE_LABEL="hive-1.2.1";## Execute the initialization statementsource linkis_configuration_dml.sql

    4. Installation and startup Linkis1.0#

         Start Linkis 1.0 to verify whether the service has been started normally and provide external services. For details, please refer to: Quick Deployment Linkis1.0

    - + \ No newline at end of file diff --git a/docs/latest/upgrade/upgrade_guide/index.html b/docs/latest/upgrade/upgrade_guide/index.html index 452237239cd..9dc9c45fb66 100644 --- a/docs/latest/upgrade/upgrade_guide/index.html +++ b/docs/latest/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ Version upgrades above 1.0.3 | Apache Linkis - + @@ -34,7 +34,7 @@ Linkis' nginx configuration file is by default in /etc/nginx/conf.d/dss.conf

    #Example        server {            ......            location dss/linkis {            alias /appcom/Install/linkis-web-newversion/dist; # static file directory            index index.html index.html;            }            ......        }

    Reload nginx configuration

    sudo nginx -s reload

    5.3 Notes#

    • After the management console is upgraded, because the browser may have a cache, if you want to verify the effect, it is best to clear the browser cache
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/console_manual/index.html b/docs/latest/user_guide/console_manual/index.html index c4ee3c00a0c..a3842ca64c7 100644 --- a/docs/latest/user_guide/console_manual/index.html +++ b/docs/latest/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Console User Manual | Apache Linkis - + @@ -15,7 +15,7 @@
    Version: 1.1.2

    Console User Manual

    Linkis1.0 has added a new Computatoin Governance Console page, which can provide users with an interactive UI interface for viewing the execution of Linkis tasks, custom parameter configuration, engine health status, resource surplus, etc, and then simplify user development and management efforts.

    1. Structure of Computatoin Governance Console#

    The Computatoin Governance Console is mainly composed of the following functional pages:

    • Global History
    • Resource Management
    • Parameter Configuration
    • Global Variables
    • ECM Management (Only visible to linkis computing management console administrators)
    • Microservice Management (Only visible to linkis computing management console administrators)

    Global history, resource management, parameter configuration, and global variables are visible to all users, while ECM management and microservice management are only visible to linkis computing management console administrators.

    The administrator of the Linkis computing management desk can configure through the following parameters in linkis.properties:

    wds.linkis.governance.station.admin=hadoop (multiple administrator usernames are separated by ‘,’)

    2. Global history#

    The global history interface provides the user's own linkis task submission record. The execution status of each task can be displayed here, and the reason for the failure of task execution can also be queried by clicking the view button on the left side of the task

    ./media/image2.png

    ./media/image3.png

    For linkis computing management console administrators, the administrator can view the historical tasks of all users by clicking the switch administrator view on the page.

    ./media/image4.png

    3. Resource management#

    In the resource management interface, the user can see the status of the engine currently started and the status of resource occupation, and can also stop the engine through the page.

    ./media/image5.png

    4. Parameter configuration#

    The parameter configuration interface provides the function of user-defined parameter management. The user can manage the related configuration of the engine in this interface, and the administrator can add application types and engines here.

    ./media/image6.png

    The user can expand all the configuration information in the directory by clicking on the application type at the top and then select the engine type in the application, modify the configuration information and click "Save" to take effect.

    Edit catalog and new application types are only visible to the administrator. Click the edit button to delete the existing application and engine configuration (note! Deleting the application directly will delete all engine configurations under the application and cannot be restored), or add an engine, or click "New Application" to add a new application type.

    ./media/image7.png

    ./media/image8.png

    5. Global variable#

    In the global variable interface, users can customize variables for code writing, just click the edit button to add parameters.

    ./media/image9.png

    6. ECM management#

    The ECM management interface is used by the administrator to manage the ECM and all engines. This interface can view the status information of the ECM, modify the ECM label information, modify the ECM status information, and query all engine information under each ECM. And only the administrator can see, the administrator's configuration method can be viewed in the second chapter of this article.

    ./media/image10.png

    Click the edit button to edit the label information of the ECM (only part of the labels are allowed to be edited) and modify the status of the ECM.

    ./media/image11.png

    Click the instance name of the ECM to view all engine information under the ECM.

    Similarly, you can stop the engine on this interface, and edit the label information of the engine.

    7. Microservice management#

    The microservice management interface can view all microservice information under Linkis, and this interface is only visible to the administrator. Linkis's own microservices can be viewed by clicking on the Eureka registration center. The microservices associated with linkis will be listed directly on this interface.

    - + \ No newline at end of file diff --git a/docs/latest/user_guide/how_to_use/index.html b/docs/latest/user_guide/how_to_use/index.html index 724f3d8e349..ff3c0045a64 100644 --- a/docs/latest/user_guide/how_to_use/index.html +++ b/docs/latest/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ How to Use | Apache Linkis - + @@ -18,7 +18,7 @@ DSS Run Workflow

    - + \ No newline at end of file diff --git a/docs/latest/user_guide/linkis-datasource-client/index.html b/docs/latest/user_guide/linkis-datasource-client/index.html index 9b508b6bf10..d57f346fb3a 100644 --- a/docs/latest/user_guide/linkis-datasource-client/index.html +++ b/docs/latest/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK | Apache Linkis - + @@ -31,7 +31,7 @@ def testMetadataGetDatabases(client:LinkisMetaDataRemoteClient): Unit ={ client.getDatabases(MetadataGetDatabasesAction.builder().setUser("hadoop").setDataSourceId(9l).setUser("hadoop").setSystem("client").build()).getDbs }}
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/linkiscli_manual/index.html b/docs/latest/user_guide/linkiscli_manual/index.html index 1a07728c1a0..20483f080cb 100644 --- a/docs/latest/user_guide/linkiscli_manual/index.html +++ b/docs/latest/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli Manual | Apache Linkis - + @@ -16,7 +16,7 @@

    Note:

    1. variableMap does not support configuration

    2. When there is a conflict between the configured key and the key entered in the command parameter, the priority is as follows:

      Instruction Parameters> Key in Instruction Map Type Parameters> User Configuration> Default Configuration

    Example:

    Configure engine startup parameters:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    Configure labelMap parameters:

       wds.linkis.client.label.myLabel=label123

    Six, output result set to file#

    Use the -outPath parameter to specify an output directory, linkis-cli will output the result set to a file, and each result set will automatically create a file. The output format is as follows:

        task-[taskId]-result-[idx].txt    

    E.g:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/overview/index.html b/docs/latest/user_guide/overview/index.html index 6213539d9de..b2c6b3cc510 100644 --- a/docs/latest/user_guide/overview/index.html +++ b/docs/latest/user_guide/overview/index.html @@ -7,7 +7,7 @@ Overview | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/sdk_manual/index.html b/docs/latest/user_guide/sdk_manual/index.html index 4b40594de69..c9830eec90e 100644 --- a/docs/latest/user_guide/sdk_manual/index.html +++ b/docs/latest/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK Manual | Apache Linkis - + @@ -42,7 +42,7 @@ }
    - + \ No newline at end of file diff --git a/docs/latest/user_guide/udf/index.html b/docs/latest/user_guide/udf/index.html index 838224e17f6..a4574b63a3c 100644 --- a/docs/latest/user_guide/udf/index.html +++ b/docs/latest/user_guide/udf/index.html @@ -7,7 +7,7 @@ Use of UDFs | Apache Linkis - + @@ -20,7 +20,7 @@ Prerequisite: The sharing function needs to be used by the user as an administrator, otherwise the front-end page will not provide an operation entry.

    Click the share button of udf: the content box will pop up, enter the list of users you want to share (comma separated).

    Note: After sharing to others, others need to actively load the UDF before using it.

    After sharing, the shared user can find it in "Shared Function", check the load and use it.

    5 Introduction of other functions#

    5.1 UDF handover#

    For example, when the user leaves the company, it may be necessary to hand over personal udf to others. Click the Handover button, select your handover object, and click OK.

    5.2 UDF Expiration#

    For a UDF shared to others, if it has been loaded by the sharing user, the udf cannot be deleted directly, but the udf can only be marked as expired. For the time being, it is only used for marking and does not affect use.

    5.3 UDF version list#

    Click the "version list" button of a udf to view all versions of the udf. The following features are provided for each version:

    Create a new version: Copy the corresponding version to the latest version.

    Download: Download the udf file from bml to the local.

    View the source code: For the python/scala script type, you can directly view the source code, but the jar type is not supported.

    Publish: The shared udf can click to publish a certain version, so that the version will take effect for the shared user. Note: Shared users use the latest version of udf released, and individual users always use the latest version.

    - + \ No newline at end of file diff --git a/download/download-logo/index.html b/download/download-logo/index.html index 2af9371cea0..5de940c7397 100644 --- a/download/download-logo/index.html +++ b/download/download-logo/index.html @@ -7,7 +7,7 @@ Download Logo | Apache Linkis - + @@ -15,7 +15,7 @@
    - + \ No newline at end of file diff --git a/download/main/index.html b/download/main/index.html index 4650f965f41..2bc35167998 100644 --- a/download/main/index.html +++ b/download/main/index.html @@ -7,7 +7,7 @@ Release List | Apache Linkis - + @@ -23,7 +23,7 @@

    For detailed guidelines, please refer to: Compilation and Packaging Guidelines

    - + \ No newline at end of file diff --git a/download/release-notes-1.0.2/index.html b/download/release-notes-1.0.2/index.html index 79ed7dca46f..d41c46b42b1 100644 --- a/download/release-notes-1.0.2/index.html +++ b/download/release-notes-1.0.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.2 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.0.2

    This is Non-ASF Version

    Linkis-1.0.2 includes all of Project Linkis-1.0.2.

    This release mainly introduces Flink-support into Linkis ecosystem.

    The following key features are added:

    • Flink-EngineConn which offers solid support for Flink jobs. Executing, debugging and monitoring Flink SQL or applications are now available, together with SQL-enhancement ability powered by Linkis Orchestrator.
    • LinkisManagerClient which enables direct access to LinkisManager. Submitting and managing OnceJob rely on this feature.

    Abbreviations:

    CGS: Computation Governance Services

    PES: Public Enhancement Services

    MGS: Microservice Governance Services


    New Feature#

    EngineConn#

    • Linkis-936 [CGS-LinkisOnceEngineconn] supports OnceEngineExecutor

    EnginePlugin#

    • Linkis-935 [CGS-EngineConnPlugin-Flink] supports Flink EngineConn
    • Linkis-947 [CGS-EngineConnPlugin-Flink] supports executing Flink SQL and Flink applications
    • Linkis-948 [CGS-EngineConnPlugin-Flink] multiple-datasource support for Flink EngineConn
    • Linkis-949 [CGS-EngineConnPlugin-Flink] monitoring Flink Metrics

    ComputationClient#

    • Linkis-937 [CGS-LinkisComputationClient] supports OnceEngineExecutor client

    Enhancement#

    • Linkis-953 [CGS-LinkisManager] label supports '-' in hostname
    • Linkis-925 [MGS-LinkisServiceGateway] fix weak password in linkis gateway
    • Linkis-950 [CGS-LinkisEngineConnManager] support both ip address and hostname for service discovery
    • Linkis-967 [CGS-LinkisEntrance] remove instance-label client dependency, Solve the host name and ip judgment abnormality in the gateway router, exclude the pom dependency to pentaho-aggdesigner-algorithm jar.
    • Linkis-963 [PES-LinkisBmlServer] default download user changed to jvm user, and supports to set default download user by configuration.

    Bugs Fix#

    • Linkis-938 [CGS-LimkisMnagager] fixes a serial execution bug
    • Linkis-952 [CGS-LinkisEngineConn] fixes a redundant thread bug
    • Linkis-943 [CGS-EngineConnPlugin-Hive] fixes a Hive3.0 compilation error
    • Linkis-961 [CGS-EngineConnPlugin-Flink] fixes a Flink-EnginePlugin compilation bug
    • Linkis-966 [CGS-EngineConnPlugin-Hive][CGS-EnginePlugin-Spark] Solve Spark and hive compatibility issue

    Credits#

    The release of Linkis 1.0.2 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors!

    - + \ No newline at end of file diff --git a/download/release-notes-1.0.3/index.html b/download/release-notes-1.0.3/index.html index 63645f4bdf9..f679b948139 100644 --- a/download/release-notes-1.0.3/index.html +++ b/download/release-notes-1.0.3/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.3 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.0.3

    Apache Linkis(incubating) 1.0.3 includes all of Project Linkis-1.0.3.

    This version is the first version of Linkis entering Apache incubation. It mainly completes the ASF infrastructure construction, including license improvement/package name modification, etc. In addition, features such as EngineConn support for Operators are added, and bugs in version 1.0.2 reported by the community are fixed.

    The following key features are added:

    • Deprecate Jersey and use Spring MVC to build HTTP RESTful APIs
    • Replace codehaus json with fastxml json
    • Support EngineConn/OnceEngineConn common operators
    • Support proxy user with kerberos

    Abbreviations:

    • CGS: Computation Governance Services
    • PES: Public Enhancement Services
    • MGS: Microservice Governance Services
    • EC: Engineconn
    • ECM: EngineConnManager

    New Feature#

    • [CGS&PES&MGS][Linkis-1002] Deprecate Jersey and use Spring MVC to build HTTP RESTful APIs, use spring's DispatcherServlet and unify the annotations of the web interface
    • [CGS&PES&MGS][Linkis-1038] Upgrade codehaus json to stable fastxml json
    • [CGS-Engineconn][Linkis-1027] Support for accessing kerberos-enabled Hadoop clusters using Hadoop's proxy-user mechanism
    • [CGS-EngineConnManager][Linkis-1248] Support ECM to obtain all logs of EC when EC is running or stopped
    • [CGS-LinkisManager][Linkis-1043] Support engine operator, the client can perform specific operations on EngineConn through the engine operator and return result
    • [CGS-LinkisOnceEngineconn][Linkis-946] Support hostname and IP address for eureka service discovery and service invocation to meet containerized deployment scenarios such as k8s
    • [CGS-LinkisOnceEngineconn][Linkis-1078] Support EngineConn/OnceEngineConn general operator, providing basic capabilities for integrating with streaming computing engines

    Enhancement#

    • [Commons][Linkis-1026] Optimize the display of numeric type fields exported to Excel
    • [Commons][Linkis-1036] Optimize the file permissions of the shared file system in LocalFileSystem mode
    • [Commons][Linkis-1185] Add some scala code specification checking rules to automatically detect scala code format
    • [Orchestrator][Linkis-1183] Optimize the code with high cyclic complexity of the Orchestrator module and the Map problem under high concurrency ,
    • [MGS-LinkisServiceGateway][Linkis-1064] Support the whitelist configuration of http api, which can be called without user login authentication
    • [CGS-EngineConnManager][Linkis-1030] Support for transferring custom environment variables from ECM to EC
    • [CGS-EngineConnPlugin] [Linkis-1083] Unify and optimize the engineConnPlugin exception class
    • [CGS-EngineConnPlugin][Linkis-1203] Optimize tag update/delete logic
    • [CGS-EngineConnPlugin-JDBC] [Linkis-1117] Support kerberos authentication type for linkis jdbc
    • [CGS-EngineConnPlugin-Flink] [Linkis-1070] Optimize flink EngineConn in prod mode for jar application submitting and optimize the kill operation of Flink computation executor
    • [CGS-EngineConnPlugin-Flink] [Linkis-1248] Enhance the FlinkOnceJob, support to execute set, show grammar of flinkSQL
    • [CGS-EngineConnManager][Linkis-1167] Add JAVA_HOME for ProcessEngineConnLaunch
    • [CGS-ComputationClient][Linkis-1126] Support python matplotlib to display images
    • [CGS-Entrance][Linkis-1206] Optimize the logic of Entrance and add taskID to distinguish tasks
    • [CGS-LinkisManager][Linkis-1209] Optimize multiple functions commonly used by manager: add update and startup time attributes to node objects /yarn resource acquisition method
    • [CGS-LinkisManager][Linkis-1213] Optimize the relationship between long-lived tags and nodes
    • [CGS-LinkisManager][Linkis-1222] The response result of the request to support ECM registration is returned
    • [PES-PublicService][Linkis-1211] Optimize the database information update logic of jobhistory, remove the transaction, and add retry logic
    • [PES-Metadata][Linkis-1224] Remove the association restriction between datasource/dbs http interface query results and logged-in users through parameter configuration

    Bugs Fix#

    • [DB][Linkis-1053] Fix the problem of data insertion failure caused by too long database table fields
    • [DB][Linkis-1087] Remove duplicate DDL statements
    • [Commons][Linkis-1058] Fix the problem that the material package with subdirectories could not be compressed when uploading
    • [Commons][Linkis-1223] Upgrade log4j version to 2.17.0
    • [Commons][Linkis-1052] Fixed not getting route instance when hostname starts with application name
    • [CGS-LinkisManager][Linkis-1014] Fix the wrong usage of object equality judgment
    • [CGS-LinkisManager][Linkis-1054] Fix instance label parsing failure when hostname contains service name.
    • [CGS-LinkisManager][Linkis-1074] Fix NPE issue with http api 'rm/userresources'
    • [CGS-LinkisManager][Linkis-1101] Fixed the issue that the monitor failed to send the engine heartbeat RPC request, causing the engine to be killed by mistake
    • [CGS-LinkisManager][Linkis-1210] Fix instance check and engine tag exclusion bug
    • [CGS-LinkisManager][Linkis-1214] Fix multiple Bugs with high concurrency in RM
    • [CGS-LinkisManager][Linkis-1216] Remove node monitor module from AM
    • [MGS-LinkisServiceGateway][Linkis-1093] Fix permission bypass when the value of pass auth uri is empty
    • [MGS-LinkisServiceGateway][Linkis-1105] Fix linkis default test account weak password problem
    • [MGS-LinkisServiceGateway][Linkis-1234] Fix memory leak problem of SSO login
    • [CGS-Common][Linkis-1199] Fix SqlCodeParser to escape the separator ";" into multiple SQL
    • [CGS-Entrance][Linkis-1073] Fix http api 'entrance/{id}/killJobs' caused by unused parameters exception {ID}
    • [CGS-Entrance][Linkis-1106] VarSubstitutionInterceptor get code type bug fix
    • [CGS-Entrance][Linkis-1149] Fix the problem that the foreground cannot get the progress information after the job task is completed
    • [CGS-Entrance][Linkis-1205] Fixed LogWirter's oom bug
    • [CGS-EngineConnPlugin][Linkis-1113] Fix sql statement error when bml resource data record is updated
    • [CGS-EngineConnPlugin-JDBC] [Linkis-923] Fix the bug of connection url without JDBC engine
    • [CGS-EngineConnPlugin-Spark][Linkis-1017] Fix spark3 engine compilation error
    • [CGS-EngineConnPlugin-Flink][Linkis-1069] Fix the ClassNotfoundException problem caused by the lack of dependencies such as hadoop-mapreduce-client-core in the Flink engine
    • [CGS-EngineConnPlugin-Flink][Linkis-1128] Fix the problem of inaccurate table data insertion in the flink engine
    • [CGS-EngineConnPlugin-Flink][Linkis-1304] Fix the bug that flink sql cannot support multi-sql and when use set, rest, drop grammar, the checkpoint will invalid.
    • [CGS-EngineConnPlugins-Hive][Linkis-1231] Fix the progress bug of the engine pushing multiple sub-jobs
    • [PEC-BmlServer][Linkis-1155] Fix the problem of using mysql reserved words in sql statements
    • [PEC-CSServer][Linkis-1160] Fix NPE in CsJobListener
    • [Orchestrator][Linkis-1179] Fix the bug caused by orchestrator concurrency
    • [Orchestrator][Linkis-1186] Fix the problem that the tasks queued by Orchestrator cannot be killed
    • [Console][Linkis-1121] Get the protocol from the current request, remove the hard code for 'http'

    Other#

    • [Commons&MGS-LinkisServiceGateway][Linkis-1192] The third-party reliance on mysql-connector-java violates the Apache License Policy. Therefore, the dependency on mysql-connector-java has been removed from 1.0.3. If you only use it for your own project, you can add mysql-connector-java dependency to your project.
    • [Commons&MGS-LinkisEureka][Linkis-1291] Exclude jar packages with unclear license attribution category io.github.x-stream:mxparser
    • [Commons][Linkis-1287] Split binary distribution package and source code LICENSE/NOTICE and other files
    • [Console][Linkis-1301] Remove font files with unknown license authorization and front-end resource files such as unused image icons
    • [CGS-EngineConnPlugins-Python][Linkis-1281] Remove the pyspark.zip in the source code and add the LICENSE.txt of py4j document

    Credits#

    The release of Apache Linkis(incubating) 1.0.3 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors!

    - + \ No newline at end of file diff --git a/download/release-notes-1.1.0/index.html b/download/release-notes-1.1.0/index.html index 42a3b9afb82..3df3dcb2c10 100644 --- a/download/release-notes-1.1.0/index.html +++ b/download/release-notes-1.1.0/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.0 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.1.0

    Apache Linkis(incubating) 1.1.0 includes all of Project Linkis-1.1.0.

    This release mainly adds datasource and metadata source management services, supports metadata information query for hive/mysql/kafka/elasticsearch, and fixes bugs in version 1.0.3 reported by the community.

    The following key features have been added:

    • Provides Restful interface to add, delete, check, and modify data sources, as well as data source connection tests.
    • Provides Restful interface for database, table, partition, column attribute query for metadata.
    • Provides Java clients for data source and metadata service management.

    Abbreviations:

    • CGS: Computation Governance Services
    • PES: Public Enhancement Services
    • MGS: Microservice Governance Services
    • EC: Engineconn
    • ECM: EngineConnManager
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service

    New Feature#

    • [DMS-Common][Linkis-1335] Add a new module linkis-datasource-manager-common, add datasource data structure/exception class/util class.
    • [DMS-Common][Linkis-1340] Add a new module linkis-metadata-manager-common, add metadata data structure/exception class/util class.
    • [MDS-Server][Linkis-1352] Add a new module linkis-datasource-manager-server to provide data source management services, provides functions such as adding, deleting, checking, and modifying data sources through the restful interface.
    • [MDS-Server][Linkis-1356] Add a new module linkis-metadata-manager-server to provide metadata management services, which provides database, table, and column queries of metadata through the restful interface.
    • [MDS-Services][Linkis-1366] Add a new module linkis-metadata-manager-service-es to provide elasticsearch metadata management service.
    • [MDS-Services][Linkis-1368] Add a new module linkis-metadata-manager-service-hive, providing for hive Metadata management service.
    • [MDS-Services][Linkis-1371] Add a new module linkis-metadata-manager-service-kafka, providing for kafka Metadata management service.
    • [MDS-Services][Linkis-1373] Add a new module linkis-metadata-manager-service-mysql, provide for mysql Metadata management service.
    • [DMS&MDS-Client][Linkis-1418] [[Linkis-1434]](https://github.com/apache /incubator-linkis/pull/1434)[Linkis-1438][[Linkis-1441]](https://github.com /apache/incubator-linkis/pull/1441) Add a new data source management Java client module linkis-datasource-client to facilitate data source management through sdk.
    • [DMS&MDS-Web][Linkis-1456] [[Linkis-1510] Added data source front-end management page, through which you can Simple creation and testing of the data source.

    Enhancement#

    • [MGS-LinkisServiceGateway][Linkis-1377] Introduce the Skywalking component to provide basic capabilities of distributed trace and troubleshooting
    • [CGS-EngineConnPlugin][Linkis-1408] Adjust the default maximum idle time of engine resources to 0.5h to optimize multi-user scenarios Next, the problem of waiting time for resource competition
    • [CGS-EngineConnPlugin][Linkis-1535] set JAVA_ENGINE_REQUEST_INSTANCE to constant 1
    • [DB][Linkis-1554] Add DataSource DDL and DML SQL
    • [MDS][Linkis-1583] Add functionality to get attributes of partitions in Hive datasources and fix connection issues
    • [MGS-LinkisServiceGateway][Linkis-1636] use regular expression to match gateway URL, if it matches, it will pass normally
    • [Commons][Linkis-1397] Add maven wrapper to support compiling and packaging using mvnw script
    • [EC][Linkis-1425] Unify ec's log configuration file as log4j2.xml
    • [Install-Script][Linkis-1563] Optimize linkis-cli client script, remove redundant linkis-cli- start script file
    • [Install-Script][Linkis-1559] Optimize the installation and deployment script, add a database connection test check when installing and deploying; Before initialization, print the information of the database so that the personnel can confirm again
    • [Install-Script][Linkis-1559]](https://github.com/apache/incubator-linkis/issues/1559) Add necessary deployment log information and color identification of key information, such as execution steps/create directory log, etc.
    • [Install-Script][Linkis-1559] add basic environment check for spark/hadoop/hive
    • [Install-Script][Linkis-1559] Migrate hive metabase HIVE_META information configuration from linkis-env.sh to db. sh
    • [Commons][Linkis-1557] Spring-boot/Spring-cloud version control uses the pom file method of the official dependency manager, Avoid introducing too many version configurations
    • [Commons][Linkis-1621] Spring upgrade, Spring-boot upgrade to 2.3.12.RELEASE, Spring-cloud upgrade to Hoxton.SR12
    • [Commons][Linkis-1558] Unit test JUnit 4 migration upgrade to JUnit 5
    • [Commons&MGS-Eureka][Linkis-1313] Remove unnecessary third-party dependencies and reduce packaged materials to a certain extent package size
    • [Commons&MGS-LinkisServiceGateway][Linkis-1660] Use spring-boot-starter-jetty to replace the direct introduction of jetty dependencies to avoid jetty version conflict

    Bugs Fix#

    • [Deployment][Linkis-1390] Fix the directory wds.linkis.resultSet for storing Job result set files created during installation and deployment. store.path, the problem of insufficient permissions after switching users during use
    • [Commons][Linkis-1469] Fix the problem that SQL cannot be cut correctly when the ';' character is included in the sql script
    • [CGS-EngineConnPlugin-JDBC][Linkis-1529] Fix the abnormal problem of NullPointerException in JDBC engine authentication type parameter
    • [CGS-Entrance][Linkis-1540] Fix the "kill" method parameter long type in linkis-entrance, which causes the null value to be unrecognized question
    • [Commons][Linkis-1600] Fix the lower version of commons-compress, resulting in an error when the result set is downloaded as excel
    • [CGS-Client][Linkis-1603] Fix the problem that the client does not support the -runtimeMap parameter
    • [CGS-EngineConnPlugin-JDBC][Linkis-1610] Fix jdbc engine cannot support "show databases;" statement problem for postgresql
    • [Commons][Linkis-1618] Fix http response return result in xml format instead of json format
    • [CGS-EngineConnPlugin-JDBC][Linkis-1646] When JDBC engine queries complex type fields, the value is displayed as object address.
    • [CGS-EngineConnPlugin-PYTHON][Linkis-1731] Fix the problem of row inversion of the result set field of the python engine's showDF function
    • [PES-BML][Linkis-1556] Fix the HttpMessageNotWritableException that may occur in the file download interface

    Credits#

    The release of Apache Linkis(incubating) 1.1.0 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following contributors for this version: Alexkun, CCweixiao, Celebrate-future, Davidhua1996, FireFoxAhri, WenxiangFan , Zosimer, aleneZeng, casionone, dddyszy, det101, ganlangjie, huapan123456, huiyuanjjjjuice, husofskyzy, iture123, jianwei2, legendtkl, peacewong, pjfanning, silent-carbon, xiaojie19852006

    - + \ No newline at end of file diff --git a/download/release-notes-1.1.1/index.html b/download/release-notes-1.1.1/index.html index 960fe8219c4..658aa426c46 100644 --- a/download/release-notes-1.1.1/index.html +++ b/download/release-notes-1.1.1/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.1 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.1.1

    Apache Linkis(incubating) 1.1.1 includes all of Project Linkis-1.1.1.

    This release mainly supports the functions of UDF multi-version control and UDF storage to BML; the submission task supports the collection and viewing of Yarn queue resource usage statistics; new support for the data virtualization engine OpenLooKeng; and known bugs reported by the community are fixed.

    The following key features have been added:

    • Support proxy user mode, user A can perform tasks on behalf of user B, one proxy user can proxy multiple users
    • Support UDF multi-version control and UDF storage to BML features
    • Submission of tasks supports the collection of Yarn queue resource usage statistics and the visual view of the management console page
    • Added support for data virtualization engine OpenLooKeng

    abbreviation:

    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager

    New Feature#

    • [Gateway&Entrance][Linkis-1608] Support proxy user mode, user A can perform tasks on behalf of user B, query user B's Related data, a proxy user can proxy multiple users
    • [LM-ResourceManager][Linkis-1616] The resource address configuration of YARN ResourceManager supports high-availability multi-address configuration, the current YARN ResourceManager conversion When the status or stop, the master node will be resolved from the high-availability address list to continue to provide services
    • [EC-OpenLooKeng][Linkis-1639] Added support for data virtualization engine OpenLooKeng
    • [UDF][Linkis-1534] Support UDF multi-version control and UDF storage to BML, submit tasks support Yarn queue resources Use statistics collection and management console page visualization
    • [Client][Linkis-1718] The Linkis-cli client supports submitting Once type tasks, which will only run once after the engine process is started Task, automatically destroyed after the task is over
    • [ECP][Linkis-1758] Add engine material refresh interface, support to refresh engine material resources through http interface call

    Enhancement#

    • [Gateway][Linkis-1430] For the Token authentication method, the Token acquisition is adjusted from the configuration file to the database table
    • [Entrance][Linkis-1642] Optimize the excel export interface resultsetToExcel: support passing the number of rows of downloaded data
    • [Entrance][Linkis-1733] Add support for more default time variables related to run_date
    • [Entrance][Linkis-1794] Add to limit the data size of a single row in the result set, and optimize the OOM problem caused by large result sets
    • [DMS-Common][Linkis-1757] Support to configure Hive metadata administrator, administrators can obtain hive's metadata through the interface Metadata information for all library tables
    • [Common][Linkis-1799] Optimize the segmentation of service logs: adjust the log history segmentation time from one day to one hour
    • [Common][Linkis-1921] Optimize Jackson's dependency management: manage jackson dependencies uniformly through jackson-bom, and upgrade to Version 2.11.4
    • [ECM][Linkis-1779] Optimize the status monitoring logic of ECM instances, and increase the judgment of heartbeat reporting time. The fix may be due to Eureka performance issues leading to misjudgment issues
    • [ECM][Linkis-1930] ECM resource is not checked when optimizing resource check
    • [Web][Linkis-1596] Optimize the use of the interface for viewing the task log of the management console, and fix the log cannot be used for the running job Timely refresh display issues
    • [Web][Linkis-1650] linkis console - global history page, support to filter historical task data by creator information search

    Bugs Fix#

    • [Entrance][Linkis-1623] Fix LogPath and ResultSetPath incorrectly use submitUser as executeUser
    • [Entrance][Linkis-1640] Fix LogReader using singleton InputStream, there is log loss, unable to read the latest persistent log The problem
    • [Entrance][Linkis-2009] Fix the problem of memory leak caused by not closing thread resources in Entrance service
    • [Entrance][Linkis-1901] Replaced the cache in EntranceFactory with Guava Cache, fixed that the user modified the concurrency parameter and it could not take effect The problem
    • [Entrance][Linkis-1986] Fix the abnormal number of lines obtained in the Entrance real-time log, resulting in the duplicated log obtained
    • [ECM][Linkis-1714] Optimize EC by reducing EC Java default memory size and adding retry log for EC application The abnormal problem of "Cannot allocate memory" appears
    • [ECM][Linkis-1806] Optimize the life cycle processing logic of EC, when ECM starts EC due to insufficient queue resources and timeout When the status is Failed, kill the EC process
    • [Common][Linkis-1721] Fixed the issue that hdfsFileSystem was not refreshed when Kerberos authentication failed
    • [UDF][Linkis-1728] Optimize /api/rest_j/v1/udf/all API interface for occasional queries time consuming problem
    • [Config][Linkis-1859] Fix the problem of abnormal primary key duplication in the console parameter configuration saveFullTree interface
    • [Clinet][Linkis-1739] Fix the ujes-client request, the parameter spelling error caused the parameter transmission to fail
    • [Client][Linkis-1783] Fix the problem that the default configuration of the task creator creator parameter does not take effect
    • [Client][Linkis-1821] Fix ujes-client request entity class GetTableStatisticInfoAction parameter is missing
    • [EC][Linkis-1765] Fix the blocking problem that EC triggers tryShutdown when the task is running
    • [LM-AppManager][Linkis-1814] Fix the response information returned by the createEngineConn interface of EngineRestfulApi is incorrect, resulting in NPE in client calls The problem.
    • [Web][Linkis-1972] Remove the dss related interface code left but not used by the management console for historical reasons
    • [EC-Spark][Linkis-1729] Add SparkPreExecutionHook function, compatible with the old package name before Linkis (com.webank.wedatasphere .linkis)
    • [EC-JDBC][Linkis-1851] Fix the jdbc engine, the problem that there are multiple sql statements in one task execution cannot be executed normally
    • [EC-JDBC][Linkis-1961] Fix the problem that the log cannot be printed normally due to the SLF4J dependency problem when the jdbc engine starts
    • [Gateway][Linkis-1898] Fix the problem that the initial domain name cannot be set when the GatewaySSOUtils user successfully logs in to generate a cookie

    Others#

    • [License][Linkis-2110] Removed the binary file .mvn/wrapper/maven-wrapper.jar in the source code, and adjusted the LICENSE content related to .mvn/*
    • [License][Linkis-2113] Upgrade py4j-0.10.7-src.zip to py4j-0.10.9.5-src.zip, update the license files of py4j-*.src and adjust it location, from linkis-engineconn-plugins/engineconn-plugins/python /src/main/py4j/LICENSE-py4j-0.10.7-src.txt moved to licenses/LICENSE-py4j-0.10.9.5-src.txt for easy viewing
    • Fixed the issue of using Window's line endings format CTRL in the release source code of shell script:mvnw

    Credits#

    The release of Apache Linkis(incubating) 1.1.1 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following Contributors: AbnerHung, Alexkun, barry8023, CCweixiao, Davidhua1996, Fuu3214, Liveipool, casinoone, demonray , husofskyzy, jackxu2011, legendtkl, lizheng920625, maidangdang44, peacewong, seedscoder

    - + \ No newline at end of file diff --git a/download/release-notes-1.1.2/index.html b/download/release-notes-1.1.2/index.html index 5489e3978e9..a69edca8fe8 100644 --- a/download/release-notes-1.1.2/index.html +++ b/download/release-notes-1.1.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.2 | Apache Linkis - + @@ -15,7 +15,7 @@

    Release Notes 1.1.2

    Apache Linkis(incubating) 1.1.2 includes all of Project Linkis-1.1.2.

    This release mainly supports simplified deployment in an environment without HDFS (supports some engines), which is convenient for more lightweight learning, use and debugging; new support for data migration tool Sqoop engine; exception handling log optimization; some security vulnerabilities Component upgrades, etc.; fix known bugs reported by the community

    The main functions are as follows:

    • Supports simplified deployment in an environment without HDFS (supports some engines), which is convenient for more lightweight learning, use and debugging
    • Added support for data migration tool Sqoop engine
    • Optimize logs, etc. to improve the efficiency of troubleshooting
    • Fix the security issues of interfaces such as user unauthorized access
    • Some dependency package upgrades and community-known bug fixes

    abbreviation:

    • COMMON: Linkis Common
    • EC: Engineconn
    • ECM: EngineConnManager
    • ECP: EngineConnPlugin
    • DMS: Data Source Manager Service
    • MDS: MetaData Manager Service
    • LM: Linkis Manager
    • PS: Linkis Public Service
    • PE: Linkis Public Enhancement
    • RPC: Linkis Common RPC
    • CG: Linkis Computation Governance

    New Feature#

    • [Deployment][Linkis-1804,1811,1841,1843,1846,1933] Support for downsizing without HDFS Deployment (supports some engines), which is convenient for more lightweight learning, use and debugging.
    • [PS][Linkis-1949] Add the list interface (/listundone) of unfinished jobs, and optimize query performance by timing scheduling
    • [BML][Linkis-1811,1843] BML material service adds support for local file system storage mode deployment
    • [Common][Linkis-1887] RPC module Sender supports modifying parameters such as load balancing Ribbon
    • [Common][Linkis-2059] use task task id as trace id in logs
    • [EC][Linkis-1971] EC AsyncExecutor supports setting the number of parallel Job Groups
    • [Engine][Linkis-2109] Added support for data migration tool Sqoop engine

    Enhancement#

    • [ECP][Linkis-2074] Flink engine supports custom configuration
    • [Deployment][Linkis-1841] Support user deployment to disable Spark/Hive/HDFS environment detection
    • [Deployment][Linkis-1971] Fix the problem of automatically getting ip error when deploying on multiple NIC machines
    • [Entrance][Linkis-1941] Entrance supports passing raw jobId to EngineConn and LinkisManager
    • [Entrance][Linkis-2045] Refactor the matching relationship between script type and run type in EntranceInterceptor implementation class
    • [RPC][Linkis-1903] Modify the exception handling logic of the RPC module to transparently transmit the original error message of the EngineConnPlugin exception
    • [RPC][Linkis-1905] Add parameters to support passing LoadBalancer parameters, such as Ribbon
    • [Orchestrator][Linkis-1937] The orchestrator task scheduler creator configuration parameter supports configuring multiple creator values
    • [PE][Linkis-1959 ContextService adds necessary log printing to facilitate error troubleshooting
    • [EC][Linkis-1942] EC supports inserting taskID into the conf of the underlying engine, which is convenient for task bloodline analysis Associated with a specific linkis task
    • [EC][Linkis-1973] The execution error log acquisition method of Task is changed from cat to tail -1000 to control the number of logs and avoid Load large files in full
    • [CG,PE][Linkis-2014] Add configuration add/get/delete, optimize synchronization lock
    • [Common][Linkis-2016] Adjust the use of cglib dependencies, replace cglib dependencies with spring built-in cglib
    • [Gateway][Linkis-2071] Add GatewayURL attribute value to HTTP request Header

    Bugs Fix#

    • [Engine][Linkis-1931] Fix Python error loading is the function of Pyspark, not the function problem of stand-alone Python itself
    • [Deployment][Linkis-1853] Fix the problem of DDL error during installation initialization
    • [UDF][Linkis-1893] Add user permission check for udf related interfaces
    • [EC][Linkis-1933] Increase the write permission of resultSet for users who are not in the deploy user group to execute jobs
    • [EC][Linkis-1846] Fix ResultSet configuration local path is invalid
    • [EC][Linkis-1966] Replace System.ev with System.properties
    • [EC-Python][Linkis-2131] Fix Python engine exception caused by pandas
    • [PS][Linkis-1840] When downloading data in csv format, add flexible options to prevent data format disorder
    • [Orchestrator][Linkis-1992] fix concurrency issue with Orchestrator Reheater module
    • [PE][Linkis-2032] The configuration interface is optimized. When obtaining the configuration parameters of the Label, modify it to directly obtain the Key-value right
    • [Web][Linkis-2036] Instance display problem of ECM page of management console is fixed
    • [Web][Linkis-1895] Resource page display bug fix
    • [ECP][Linkis-2027] Fix abnormal error caused by ECP material download byte interception
    • [ECP][Linkis-2088] Fix the problem of progress rollback during hive task running
    • [ECP][Linkis-2090] Fix Python3 can't find the problem
    • [CG][Linkis-1751] Script custom variable run type and suffix constraint configuration
    • [CG][Linkis-2034] fix for mismatched descriptions of timed out tasks
    • [CG][Linkis-2100] Optimize db deadlock problem under high concurrency

    Security related#

    • [UDF][Linkis-1893] Fix some udf interfaces (/udf/list, /udf/tree/add, /udf /tree/update) user override problem
    • [PS][Linkis-1869] Fix Linkis PlublicService related interface override issue
    • [PS][Linkis-2086] The method /updateCategoryInfo adds permission check

    Dependency changes#

    • [MDS][Linkis-1947] mysql-connector-java upgraded from 5.1.34 to 8.0.16
    • [ECP][Linkis-1951] hive-jdbc upgraded from 1.2.1 to 2.3.3
    • [ECP][Linkis-1968] protobuf-java version upgrade to 3.15.8
    • [ECP][Linkis-2021] remove some redundant dependencies of Flink module
    • [RPC][Linkis-2018] unified version of json4s
    • [Web][Linkis-2336] Introduce the dependency of the web component jsencrypt-3.2.1 as a login password encryption and decryption tool

    Thanks#

    The release of Apache Linkis(incubating) 1.1.2 is inseparable from the contributors of the Linkis community. Thanks to all the community contributors, including but not limited to the following Contributors (in no particular order): Alexyang, Casion, David hua, GodfreyGuo, Jack Xu , Zosimer, allenlliu, casionone, ericlu, huapan123456, husofskyzy, iture123, legendtkl, luxl@chinatelecom.cn, maidangdang44, peacewong, pengfeiwei, seedscoder, weixiao, xiaojie19852006, めぐみん, Li Wei

    - + \ No newline at end of file diff --git a/faq/main/index.html b/faq/main/index.html index a2218294f36..5df1f65059a 100644 --- a/faq/main/index.html +++ b/faq/main/index.html @@ -7,7 +7,7 @@ Q&A | Apache Linkis - + @@ -16,7 +16,7 @@

    Solution: cdh6.3.2 cluster spark engine classpath only has /opt/cloudera/parcels/CDH-6.3.2-1.cdh6.3.2.p0.1605554/lib/spark/jars, need to add hive-exec-2.1.1- cdh6.1.0.jar, then restart spark.

    Q17. When the spark engine starts, it reports queue default is not exists in YARN, the specific information is as follows:#

    linkis-exception-09.png

    Solution: When the 1.0 linkis-resource-manager-dev-1.0.0.jar pulls queue information, there is a compatibility problem in parsing json. After the official classmates optimize it, re-provide a new package. The jar package path: /appcom/Install/dss- linkis/linkis/lib/linkis-computation-governance/linkis-cg-linkismanager/.

    Q18, when the spark engine starts, an error is reported get the Yarn queue information excepiton. (get the Yarn queue information abnormal) and http link abnormal#

    Solution: To migrate the address configuration of yarn to the DB configuration, the following configuration needs to be added:

    db-config-02.png

    Q19. When the spark engine is scheduled, it can be executed successfully for the first time, and if executed again, it will report Spark application sc has already stopped, please restart it. The specific errors are as follows:#

    page-show-03.png

    Solution: The background is that the architecture of the linkis1.0 engine has been adjusted. After the spark session is created, in order to avoid overhead and improve execution efficiency, the session is reused. When we execute spark.scala for the first time, there is spark.stop() in our script. This command will cause the newly created session to be closed. When executed again, it will prompt that the session is closed, please restart it. Solution: first remove stop() from all scripts, and then execute the order: execute default.sql first, then execute scalaspark and pythonspark.

    Q20, pythonspark scheduling execution, error: initialize python executor failed ClassNotFoundException org.slf4j.impl.StaticLoggerBinder, as follows:#

    linkis-exception-10.png

    Solution: The reason is that the spark server lacks slf4j-log4j12-1.7.25.jar, copy the above jar and report to /opt/cloudera/parcels/CDH-6.3.2-1.cdh6.3.2.p0.1605554/lib/spark/jars .

    Q21, pythonspark scheduling execution, error: initialize python executor failed, submit-version error, as follows:#

    shell-error-03.png

    Solution: The reason is that the linkis1.0 pythonSpark engine has a bug in obtaining the spark version code. The fix is ​​as follows:

    code-fix-01.png

    Q22. When pythonspark is scheduled to execute, it reports TypeError: an integer is required (got type bytes) (executed separately from the command to pull up the engine), the details are as follows:#

    shell-error-04.png

    Solution: The reason is that the system spark and python versions are not compatible, python is 3.8, spark is 2.4.0-cdh6.3.2, spark requires python version<=3.6, reduce python to 3.6, comment file /opt/cloudera/parcels/CDH/ The following lines of lib/spark/python/lib/pyspark.zip/pyspark/context.py:

    shell-error-05.png

    Q23, spark engine is 2.4.0+cdh6.3.2, python engine was previously lacking pandas, matplotlib upgraded local python to 3.8, but spark does not support python3.8, only supports below 3.6;#

    Solution: reinstall the python package manager anaconda2, reduce python to 2.7, install pandas, matplotlib modules, python engine and spark engine can be scheduled normally.

    - + \ No newline at end of file diff --git a/home/index.html b/home/index.html index b0ec75ca252..10714a1c29f 100644 --- a/home/index.html +++ b/home/index.html @@ -7,14 +7,14 @@ - +
    -

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way +

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/index.html b/index.html index a1d90183c8a..074e9086f21 100644 --- a/index.html +++ b/index.html @@ -7,16 +7,16 @@ Apache Linkis | Apache Linkis - +
    -

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way +

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/search/index.html b/search/index.html index 644d8c78d9d..001cf882854 100644 --- a/search/index.html +++ b/search/index.html @@ -7,7 +7,7 @@ Search the documentation | Apache Linkis - + @@ -15,7 +15,7 @@

    Search the documentation

    - + \ No newline at end of file diff --git a/team/index.html b/team/index.html index a215ca5b2c9..f1d456ba76f 100644 --- a/team/index.html +++ b/team/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -15,7 +15,7 @@

    Contributing


    You can participate in the contribution of Apache Linkis by reporting bugs/submitting new features or improvement suggestions/submitting patches/ writing or refining documents/attending community Q&A/organizing community activities, etc. For detailed instructions, please refer to Contributor's Guide.


    PPMC

    (In no particular order)

    Committer

    (Sort by English initials)

    Contributors of Apache Linkis

    Contributors of Apache Linkis WebSite

    - + \ No newline at end of file diff --git a/user/index.html b/user/index.html index 45163363744..fe25c18b93a 100644 --- a/user/index.html +++ b/user/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -15,7 +15,7 @@

    Our Users

    This project is used by the following companies
    Are you using this project?you can add your company
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    name
    - + \ No newline at end of file diff --git a/versions/index.html b/versions/index.html index 213aa52730b..92d9ec63bdd 100644 --- a/versions/index.html +++ b/versions/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -15,7 +15,7 @@

    Apache Linkis All Document Versions


    This is the current document version

    Here you can find the currently published version of the document

    1.1.2DocumentRelease NoteSource Code

    This is an unpublished document version

    Here you can find the unpublished version of the document

    Next-1.1.3(WIP)Document

    This is the previously published version of the document

    1.1.1DocumentSource Code
    1.1.0DocumentSource Code
    1.0.3DocumentSource Code
    1.0.2DocumentSource Code
    0.11.0DocumentSource Code
    - + \ No newline at end of file diff --git a/zh-CN/404.html b/zh-CN/404.html index fd055fddc4b..67a0168b1a6 100644 --- a/zh-CN/404.html +++ b/zh-CN/404.html @@ -7,7 +7,7 @@ Page Not Found | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/Images/EngineUsage/python-configure.png b/zh-CN/Images/EngineUsage/python-configure.png new file mode 100644 index 00000000000..5a92d168c39 Binary files /dev/null and b/zh-CN/Images/EngineUsage/python-configure.png differ diff --git a/zh-CN/assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png b/zh-CN/assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png new file mode 100644 index 00000000000..5a92d168c39 Binary files /dev/null and b/zh-CN/assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png differ diff --git a/zh-CN/assets/js/0252f584.48cbe33f.js b/zh-CN/assets/js/0252f584.48cbe33f.js deleted file mode 100644 index 6ac7c6b7f1b..00000000000 --- a/zh-CN/assets/js/0252f584.48cbe33f.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[11119],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var r=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,r)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var p=r.createContext({}),u=function(n){var e=r.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return r.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return r.createElement(r.Fragment,{},e)}},h=r.forwardRef((function(n,e){var t=n.components,i=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?r.createElement(d,a(a({ref:e},c),{},{components:t})):r.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.1/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168045185-f25c61b6-8727-434e-8150-e13cc4a04ade.png",alt:"python"})," "),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/0252f584.7ca2fa86.js b/zh-CN/assets/js/0252f584.7ca2fa86.js new file mode 100644 index 00000000000..ed747bf4c5e --- /dev/null +++ b/zh-CN/assets/js/0252f584.7ca2fa86.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[11119],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var r=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,r)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var p=r.createContext({}),u=function(n){var e=r.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return r.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return r.createElement(r.Fragment,{},e)}},h=r.forwardRef((function(n,e){var t=n.components,i=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?r.createElement(d,a(a({ref:e},c),{},{components:t})):r.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.1/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168045185-f25c61b6-8727-434e-8150-e13cc4a04ade.png",alt:"python"})," "),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/08bd5166.704c48e5.js b/zh-CN/assets/js/08bd5166.8f04ad0b.js similarity index 51% rename from zh-CN/assets/js/08bd5166.704c48e5.js rename to zh-CN/assets/js/08bd5166.8f04ad0b.js index 1b4c7f64801..788ae559605 100644 --- a/zh-CN/assets/js/08bd5166.704c48e5.js +++ b/zh-CN/assets/js/08bd5166.8f04ad0b.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),i=a(72389),c=a(44996),r=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,i.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==r?void 0:r[e];return n.createElement("div",null,n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,c.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),i=a(72389),c=a(44996),r=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,i.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==r?void 0:r[e];return n.createElement("div",null,n.createElement("script",{src:"//cdn.matomo.cloud/apachelinkis.matomo.cloud/matomo.js"}),n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,c.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,c.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/1b8561f3.3a00e42c.js b/zh-CN/assets/js/1b8561f3.3a00e42c.js deleted file mode 100644 index 5d2f8f1b145..00000000000 --- a/zh-CN/assets/js/1b8561f3.3a00e42c.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[71075],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var r=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,r)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var p=r.createContext({}),u=function(n){var e=r.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return r.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return r.createElement(r.Fragment,{},e)}},h=r.forwardRef((function(n,e){var t=n.components,i=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?r.createElement(d,a(a({ref:e},c),{},{components:t})):r.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.0/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(67730).Z})),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},67730:function(n,e,t){e.Z=t.p+"assets/images/python-config-63895470a36d8a8fa58eaaa44186ce23.png"},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/1b8561f3.5a69385b.js b/zh-CN/assets/js/1b8561f3.5a69385b.js new file mode 100644 index 00000000000..5fae54b6c3b --- /dev/null +++ b/zh-CN/assets/js/1b8561f3.5a69385b.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[71075],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var i=t(67294);function r(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(n);e&&(i=i.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,i)}return t}function a(n){for(var e=1;e=0||(r[t]=n[t]);return r}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(r[t]=n[t])}return r}var p=i.createContext({}),u=function(n){var e=i.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return i.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return i.createElement(i.Fragment,{},e)}},h=i.forwardRef((function(n,e){var t=n.components,r=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=r,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?i.createElement(d,a(a({ref:e},c),{},{components:t})):i.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,r=e&&e.mdxType;if("string"==typeof n||r){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:r,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.0/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(67730).Z})),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},67730:function(n,e,t){e.Z=t.p+"assets/images/python-config-63895470a36d8a8fa58eaaa44186ce23.png"},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/1f71503e.f15d3941.js b/zh-CN/assets/js/1f71503e.24259d6e.js similarity index 71% rename from zh-CN/assets/js/1f71503e.f15d3941.js rename to zh-CN/assets/js/1f71503e.24259d6e.js index 98e870c3e13..411bc875ce7 100644 --- a/zh-CN/assets/js/1f71503e.f15d3941.js +++ b/zh-CN/assets/js/1f71503e.24259d6e.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[98091],{3905:function(e,n,t){t.d(n,{Zo:function(){return k},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),u=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},k=function(e){var n=u(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,k=p(e,["components","mdxType","originalType","parentName"]),c=u(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},k),{},{components:t})):a.createElement(g,i({ref:n},k))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var u=2;u 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002 "),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044322-ce057ec0-8891-4691-9454-8fba45b2c631.png",alt:"yarn"})," "),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.1/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'#You can also add the queue value in the StartUpMap of the submission parameter: \nstartupMap.put("wds.linkis.rm.yarnqueue", "dws")\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.1/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044389-55aea9de-6dfa-4b57-81a6-220e242f9eec.png",alt:"spark"})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[98091],{3905:function(e,n,t){t.d(n,{Zo:function(){return o},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),k=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},o=function(e){var n=k(e.components);return a.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,o=p(e,["components","mdxType","originalType","parentName"]),c=k(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||u[d]||l;return t?a.createElement(g,i(i({ref:n},o),{},{components:t})):a.createElement(g,i({ref:n},o))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var k=2;k 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002 "),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044322-ce057ec0-8891-4691-9454-8fba45b2c631.png",alt:"yarn"})," "),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.1/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType\u5bf9\u5e94\u5173\u7cfb py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# \u53ef\u4ee5\u5728\u63d0\u4ea4\u53c2\u6570\u901a\u8fc7-confMap wds.linkis.yarnqueue=dws \u6765\u6307\u5b9ayarn \u961f\u5217\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.1/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044389-55aea9de-6dfa-4b57-81a6-220e242f9eec.png",alt:"spark"})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/2993eb6c.40c305aa.js b/zh-CN/assets/js/2993eb6c.40c305aa.js new file mode 100644 index 00000000000..e44a4709e47 --- /dev/null +++ b/zh-CN/assets/js/2993eb6c.40c305aa.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[73289],{3905:function(e,n,t){t.d(n,{Zo:function(){return s},kt:function(){return y}});var r=t(67294);function i(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function o(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function a(e){for(var n=1;n=0||(i[t]=e[t]);return i}(e,n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(i[t]=e[t])}return i}var p=r.createContext({}),u=function(e){var n=r.useContext(p),t=n;return e&&(t="function"==typeof e?e(n):a(a({},n),e)),t},s=function(e){var n=u(e.components);return r.createElement(p.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},h=r.forwardRef((function(e,n){var t=e.components,i=e.mdxType,o=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||c[y]||o;return t?r.createElement(d,a(a({ref:n},s),{},{components:t})):r.createElement(d,a({ref:n},s))}));function y(e,n){var t=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in n)hasOwnProperty.call(n,p)&&(l[p]=n[p]);l.originalType=e,l.mdxType="string"==typeof e?e:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/latest/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168045185-f25c61b6-8727-434e-8150-e13cc4a04ade.png",alt:"python"})," "),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},32209:function(e,n,t){n.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/2993eb6c.fb393ba4.js b/zh-CN/assets/js/2993eb6c.fb393ba4.js deleted file mode 100644 index 49300f426fc..00000000000 --- a/zh-CN/assets/js/2993eb6c.fb393ba4.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[73289],{3905:function(e,n,t){t.d(n,{Zo:function(){return s},kt:function(){return y}});var r=t(67294);function i(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function o(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);n&&(r=r.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,r)}return t}function a(e){for(var n=1;n=0||(i[t]=e[t]);return i}(e,n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(i[t]=e[t])}return i}var p=r.createContext({}),u=function(e){var n=r.useContext(p),t=n;return e&&(t="function"==typeof e?e(n):a(a({},n),e)),t},s=function(e){var n=u(e.components);return r.createElement(p.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return r.createElement(r.Fragment,{},n)}},h=r.forwardRef((function(e,n){var t=e.components,i=e.mdxType,o=e.originalType,p=e.parentName,s=l(e,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||c[y]||o;return t?r.createElement(d,a(a({ref:n},s),{},{components:t})):r.createElement(d,a({ref:n},s))}));function y(e,n){var t=arguments,i=n&&n.mdxType;if("string"==typeof e||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in n)hasOwnProperty.call(n,p)&&(l[p]=n[p]);l.originalType=e,l.mdxType="string"==typeof e?e:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/latest/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168045185-f25c61b6-8727-434e-8150-e13cc4a04ade.png",alt:"python"})," "),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},32209:function(e,n,t){n.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/2e842b4c.06acfbeb.js b/zh-CN/assets/js/2e842b4c.f4098996.js similarity index 70% rename from zh-CN/assets/js/2e842b4c.06acfbeb.js rename to zh-CN/assets/js/2e842b4c.f4098996.js index a5b54165ef9..8e005b7cd47 100644 --- a/zh-CN/assets/js/2e842b4c.06acfbeb.js +++ b/zh-CN/assets/js/2e842b4c.f4098996.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[63280],{3905:function(e,n,t){t.d(n,{Zo:function(){return k},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),u=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},k=function(e){var n=u(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,k=p(e,["components","mdxType","originalType","parentName"]),c=u(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},k),{},{components:t})):a.createElement(g,i({ref:n},k))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var u=2;u","\u6807\u7b7e\u8fdb\u884c\u6539\u62102.1.0\uff0c\u7136\u540e\u5355\u72ec\u7f16\u8bd1\u6b64\u6a21\u5757\u5373\u53ef\u3002"),(0,l.kt)("h3",{id:"22-spark-engineconn\u90e8\u7f72\u548c\u52a0\u8f7d"},"2.2 spark engineConn\u90e8\u7f72\u548c\u52a0\u8f7d"),(0,l.kt)("p",null,"\u5982\u679c\u60a8\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u4e86\u60a8\u7684spark\u5f15\u64ce\u7684\u63d2\u4ef6\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u90a3\u4e48\u60a8\u9700\u8981\u5c06\u65b0\u7684\u63d2\u4ef6\u653e\u7f6e\u5230\u6307\u5b9a\u7684\u4f4d\u7f6e\u4e2d\u624d\u80fd\u52a0\u8f7d\uff0c\u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u8fd9\u7bc7\u6587\u7ae0"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5")," "),(0,l.kt)("h3",{id:"23-spark\u5f15\u64ce\u7684\u6807\u7b7e"},"2.3 spark\u5f15\u64ce\u7684\u6807\u7b7e"),(0,l.kt)("p",null,"Linkis1.0\u662f\u901a\u8fc7\u6807\u7b7e\u6765\u8fdb\u884c\u7684\uff0c\u6240\u4ee5\u9700\u8981\u5728\u6211\u4eec\u6570\u636e\u5e93\u4e2d\u63d2\u5165\u6570\u636e\uff0c\u63d2\u5165\u7684\u65b9\u5f0f\u5982\u4e0b\u6587\u6240\u793a\u3002"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5 > 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u662f\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(90388).Z})),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.2/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'You can also add the queue value in the StartUpMap of the submission parameter: `startupMap.put("wds.linkis.rm.yarnqueue", "dws")`\n\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.2/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(72746).Z})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},90388:function(e,n,t){n.Z=t.p+"assets/images/queue-set-e89c51e5b7d25d78a78580b122e4e64c.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},72746:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-2b013d6df48bcafd6b6b672f44039eab.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[63280],{3905:function(e,n,t){t.d(n,{Zo:function(){return o},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),k=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},o=function(e){var n=k(e.components);return a.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,o=p(e,["components","mdxType","originalType","parentName"]),c=k(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||u[d]||l;return t?a.createElement(g,i(i({ref:n},o),{},{components:t})):a.createElement(g,i({ref:n},o))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var k=2;k","\u6807\u7b7e\u8fdb\u884c\u6539\u62102.1.0\uff0c\u7136\u540e\u5355\u72ec\u7f16\u8bd1\u6b64\u6a21\u5757\u5373\u53ef\u3002"),(0,l.kt)("h3",{id:"22-spark-engineconn\u90e8\u7f72\u548c\u52a0\u8f7d"},"2.2 spark engineConn\u90e8\u7f72\u548c\u52a0\u8f7d"),(0,l.kt)("p",null,"\u5982\u679c\u60a8\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u4e86\u60a8\u7684spark\u5f15\u64ce\u7684\u63d2\u4ef6\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u90a3\u4e48\u60a8\u9700\u8981\u5c06\u65b0\u7684\u63d2\u4ef6\u653e\u7f6e\u5230\u6307\u5b9a\u7684\u4f4d\u7f6e\u4e2d\u624d\u80fd\u52a0\u8f7d\uff0c\u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u8fd9\u7bc7\u6587\u7ae0"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5")," "),(0,l.kt)("h3",{id:"23-spark\u5f15\u64ce\u7684\u6807\u7b7e"},"2.3 spark\u5f15\u64ce\u7684\u6807\u7b7e"),(0,l.kt)("p",null,"Linkis1.0\u662f\u901a\u8fc7\u6807\u7b7e\u6765\u8fdb\u884c\u7684\uff0c\u6240\u4ee5\u9700\u8981\u5728\u6211\u4eec\u6570\u636e\u5e93\u4e2d\u63d2\u5165\u6570\u636e\uff0c\u63d2\u5165\u7684\u65b9\u5f0f\u5982\u4e0b\u6587\u6240\u793a\u3002"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5 > 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u662f\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(90388).Z})),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.2/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType\u5bf9\u5e94\u5173\u7cfb py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# \u53ef\u4ee5\u5728\u63d0\u4ea4\u53c2\u6570\u901a\u8fc7-confMap wds.linkis.yarnqueue=dws \u6765\u6307\u5b9ayarn \u961f\u5217\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.2/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(72746).Z})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},90388:function(e,n,t){n.Z=t.p+"assets/images/queue-set-e89c51e5b7d25d78a78580b122e4e64c.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},72746:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-2b013d6df48bcafd6b6b672f44039eab.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/387ebd51.bae69e9f.js b/zh-CN/assets/js/387ebd51.bae69e9f.js deleted file mode 100644 index f4b14fbd7b1..00000000000 --- a/zh-CN/assets/js/387ebd51.bae69e9f.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[17257],{3905:function(t,e,n){n.d(e,{Zo:function(){return o},kt:function(){return s}});var a=n(67294);function l(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function r(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(t);e&&(a=a.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),n.push.apply(n,a)}return n}function i(t){for(var e=1;e=0||(l[n]=t[n]);return l}(t,e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(t);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(t,n)&&(l[n]=t[n])}return l}var k=a.createContext({}),u=function(t){var e=a.useContext(k),n=e;return t&&(n="function"==typeof t?t(e):i(i({},e),t)),n},o=function(t){var e=u(t.components);return a.createElement(k.Provider,{value:e},t.children)},d={inlineCode:"code",wrapper:function(t){var e=t.children;return a.createElement(a.Fragment,{},e)}},m=a.forwardRef((function(t,e){var n=t.components,l=t.mdxType,r=t.originalType,k=t.parentName,o=p(t,["components","mdxType","originalType","parentName"]),m=u(n),s=l,c=m["".concat(k,".").concat(s)]||m[s]||d[s]||r;return n?a.createElement(c,i(i({ref:e},o),{},{components:n})):a.createElement(c,i({ref:e},o))}));function s(t,e){var n=arguments,l=e&&e.mdxType;if("string"==typeof t||l){var r=n.length,i=new Array(r);i[0]=m;var p={};for(var k in e)hasOwnProperty.call(e,k)&&(p[k]=e[k]);p.originalType=t,p.mdxType="string"==typeof t?t:l,i[1]=p;for(var u=2;u \u6307\u4ee4Map\u7c7b\u578b\u53c2\u6570\u4e2d\u7684key > \u7528\u6237\u914d\u7f6e > \u9ed8\u8ba4\u914d\u7f6e\n")))),(0,r.kt)("p",null,"\u793a\u4f8b\uff1a"),(0,r.kt)("p",null,"\u914d\u7f6e\u5f15\u64ce\u542f\u52a8\u53c2\u6570\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.param.conf.spark.executor.instances=3\n wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02\n")),(0,r.kt)("p",null,"\u914d\u7f6elabelMap\u53c2\u6570\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.label.myLabel=label123\n")),(0,r.kt)("h3",{id:"56-\u8f93\u51fa\u7ed3\u679c\u96c6\u5230\u6587\u4ef6"},"5.6 \u8f93\u51fa\u7ed3\u679c\u96c6\u5230\u6587\u4ef6"),(0,r.kt)("p",null,"\u4f7f\u7528",(0,r.kt)("inlineCode",{parentName:"p"},"-outPath"),"\u53c2\u6570\u6307\u5b9a\u4e00\u4e2a\u8f93\u51fa\u76ee\u5f55\uff0clinkis-cli\u4f1a\u5c06\u7ed3\u679c\u96c6\u8f93\u51fa\u5230\u6587\u4ef6\uff0c\u6bcf\u4e2a\u7ed3\u679c\u96c6\u4f1a\u81ea\u52a8\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\uff0c\u8f93\u51fa\u5f62\u5f0f\u5982\u4e0b\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"}," task-[taskId]-result-[idx].txt\n \n")),(0,r.kt)("p",null,"\u4f8b\u5982\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-html"}," task-906-result-1.txt\n task-906-result-2.txt\n task-906-result-3.txt\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/387ebd51.e1fe5f43.js b/zh-CN/assets/js/387ebd51.e1fe5f43.js new file mode 100644 index 00000000000..1fa559f88f6 --- /dev/null +++ b/zh-CN/assets/js/387ebd51.e1fe5f43.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[17257],{3905:function(t,e,n){n.d(e,{Zo:function(){return o},kt:function(){return s}});var a=n(67294);function l(t,e,n){return e in t?Object.defineProperty(t,e,{value:n,enumerable:!0,configurable:!0,writable:!0}):t[e]=n,t}function r(t,e){var n=Object.keys(t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(t);e&&(a=a.filter((function(e){return Object.getOwnPropertyDescriptor(t,e).enumerable}))),n.push.apply(n,a)}return n}function i(t){for(var e=1;e=0||(l[n]=t[n]);return l}(t,e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(t);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(t,n)&&(l[n]=t[n])}return l}var k=a.createContext({}),u=function(t){var e=a.useContext(k),n=e;return t&&(n="function"==typeof t?t(e):i(i({},e),t)),n},o=function(t){var e=u(t.components);return a.createElement(k.Provider,{value:e},t.children)},d={inlineCode:"code",wrapper:function(t){var e=t.children;return a.createElement(a.Fragment,{},e)}},m=a.forwardRef((function(t,e){var n=t.components,l=t.mdxType,r=t.originalType,k=t.parentName,o=p(t,["components","mdxType","originalType","parentName"]),m=u(n),s=l,c=m["".concat(k,".").concat(s)]||m[s]||d[s]||r;return n?a.createElement(c,i(i({ref:e},o),{},{components:n})):a.createElement(c,i({ref:e},o))}));function s(t,e){var n=arguments,l=e&&e.mdxType;if("string"==typeof t||l){var r=n.length,i=new Array(r);i[0]=m;var p={};for(var k in e)hasOwnProperty.call(e,k)&&(p[k]=e[k]);p.originalType=t,p.mdxType="string"==typeof t?t:l,i[1]=p;for(var u=2;u \u6307\u4ee4Map\u7c7b\u578b\u53c2\u6570\u4e2d\u7684key > \u7528\u6237\u914d\u7f6e > \u9ed8\u8ba4\u914d\u7f6e\n")))),(0,r.kt)("p",null,"\u793a\u4f8b\uff1a"),(0,r.kt)("p",null,"\u914d\u7f6e\u5f15\u64ce\u542f\u52a8\u53c2\u6570\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.param.conf.spark.executor.instances=3\n wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02\n")),(0,r.kt)("p",null,"\u914d\u7f6elabelMap\u53c2\u6570\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-properties"}," wds.linkis.client.label.myLabel=label123\n")),(0,r.kt)("h3",{id:"56-\u8f93\u51fa\u7ed3\u679c\u96c6\u5230\u6587\u4ef6"},"5.6 \u8f93\u51fa\u7ed3\u679c\u96c6\u5230\u6587\u4ef6"),(0,r.kt)("p",null,"\u4f7f\u7528",(0,r.kt)("inlineCode",{parentName:"p"},"-outPath"),"\u53c2\u6570\u6307\u5b9a\u4e00\u4e2a\u8f93\u51fa\u76ee\u5f55\uff0clinkis-cli\u4f1a\u5c06\u7ed3\u679c\u96c6\u8f93\u51fa\u5230\u6587\u4ef6\uff0c\u6bcf\u4e2a\u7ed3\u679c\u96c6\u4f1a\u81ea\u52a8\u521b\u5efa\u4e00\u4e2a\u6587\u4ef6\uff0c\u8f93\u51fa\u5f62\u5f0f\u5982\u4e0b\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre"}," task-[taskId]-result-[idx].txt\n \n")),(0,r.kt)("p",null,"\u4f8b\u5982\uff1a"),(0,r.kt)("pre",null,(0,r.kt)("code",{parentName:"pre",className:"language-html"}," task-906-result-1.txt\n task-906-result-2.txt\n task-906-result-3.txt\n")))}m.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/38e75aa0.e6625ba7.js b/zh-CN/assets/js/38e75aa0.cd8de110.js similarity index 72% rename from zh-CN/assets/js/38e75aa0.e6625ba7.js rename to zh-CN/assets/js/38e75aa0.cd8de110.js index ac867bb40a4..e093179d8e3 100644 --- a/zh-CN/assets/js/38e75aa0.e6625ba7.js +++ b/zh-CN/assets/js/38e75aa0.cd8de110.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[50027],{3905:function(e,n,t){t.d(n,{Zo:function(){return k},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),u=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},k=function(e){var n=u(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,k=p(e,["components","mdxType","originalType","parentName"]),c=u(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},k),{},{components:t})):a.createElement(g,i({ref:n},k))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var u=2;u 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002 "),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044322-ce057ec0-8891-4691-9454-8fba45b2c631.png",alt:"yarn"})," "),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.3/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'#You can also add the queue value in the StartUpMap of the submission parameter: \nstartupMap.put("wds.linkis.rm.yarnqueue", "dws")\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044389-55aea9de-6dfa-4b57-81a6-220e242f9eec.png",alt:"spark"})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[50027],{3905:function(e,n,t){t.d(n,{Zo:function(){return u},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),k=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},u=function(e){var n=k(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,u=p(e,["components","mdxType","originalType","parentName"]),c=k(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},u),{},{components:t})):a.createElement(g,i({ref:n},u))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var k=2;k 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002 "),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044322-ce057ec0-8891-4691-9454-8fba45b2c631.png",alt:"yarn"})," "),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.3/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'# codeType\u5bf9\u5e94\u5173\u7cfb py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# \u53ef\u4ee5\u5728\u63d0\u4ea4\u53c2\u6570\u901a\u8fc7-confMap wds.linkis.yarnqueue=dws \u6765\u6307\u5b9ayarn \u961f\u5217\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044389-55aea9de-6dfa-4b57-81a6-220e242f9eec.png",alt:"spark"})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/4eb638d8.7d7a4430.js b/zh-CN/assets/js/4eb638d8.7d7a4430.js deleted file mode 100644 index d666cc2ea59..00000000000 --- a/zh-CN/assets/js/4eb638d8.7d7a4430.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[55171],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var r=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,r)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var p=r.createContext({}),u=function(n){var e=r.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return r.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return r.createElement(r.Fragment,{},e)}},h=r.forwardRef((function(n,e){var t=n.components,i=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?r.createElement(d,a(a({ref:e},c),{},{components:t})):r.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(67730).Z})),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},67730:function(n,e,t){e.Z=t.p+"assets/images/python-config-63895470a36d8a8fa58eaaa44186ce23.png"},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/4eb638d8.b0cbde23.js b/zh-CN/assets/js/4eb638d8.b0cbde23.js new file mode 100644 index 00000000000..b5bffb2d89f --- /dev/null +++ b/zh-CN/assets/js/4eb638d8.b0cbde23.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[55171],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var i=t(67294);function r(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(n);e&&(i=i.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,i)}return t}function a(n){for(var e=1;e=0||(r[t]=n[t]);return r}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(r[t]=n[t])}return r}var p=i.createContext({}),u=function(n){var e=i.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return i.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return i.createElement(i.Fragment,{},e)}},h=i.forwardRef((function(n,e){var t=n.components,r=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=r,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?i.createElement(d,a(a({ref:e},c),{},{components:t})):i.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,r=e&&e.mdxType;if("string"==typeof n||r){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:r,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(67730).Z})),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},67730:function(n,e,t){e.Z=t.p+"assets/images/python-config-63895470a36d8a8fa58eaaa44186ce23.png"},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/5b29caaf.ed93c730.js b/zh-CN/assets/js/5b29caaf.ed93c730.js deleted file mode 100644 index eea3ab7dbb9..00000000000 --- a/zh-CN/assets/js/5b29caaf.ed93c730.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[59840],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var r=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,r)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var p=r.createContext({}),u=function(n){var e=r.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return r.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return r.createElement(r.Fragment,{},e)}},h=r.forwardRef((function(n,e){var t=n.components,i=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?r.createElement(d,a(a({ref:e},c),{},{components:t})):r.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168045185-f25c61b6-8727-434e-8150-e13cc4a04ade.png",alt:"python"})," "),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/5b29caaf.fa2c5a54.js b/zh-CN/assets/js/5b29caaf.fa2c5a54.js new file mode 100644 index 00000000000..d812d0eb774 --- /dev/null +++ b/zh-CN/assets/js/5b29caaf.fa2c5a54.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[59840],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var o=t(67294);function r(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function i(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);e&&(o=o.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,o)}return t}function a(n){for(var e=1;e=0||(r[t]=n[t]);return r}(n,e);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(n);for(o=0;o=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(r[t]=n[t])}return r}var l=o.createContext({}),u=function(n){var e=o.useContext(l),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return o.createElement(l.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return o.createElement(o.Fragment,{},e)}},h=o.forwardRef((function(n,e){var t=n.components,r=n.mdxType,i=n.originalType,l=n.parentName,c=p(n,["components","mdxType","originalType","parentName"]),h=u(t),y=r,d=h["".concat(l,".").concat(y)]||h[y]||s[y]||i;return t?o.createElement(d,a(a({ref:e},c),{},{components:t})):o.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,r=e&&e.mdxType;if("string"==typeof n||r){var i=t.length,a=new Array(i);a[0]=h;var p={};for(var l in e)hasOwnProperty.call(e,l)&&(p[l]=e[l]);p.originalType=n,p.mdxType="string"==typeof n?n:r,a[1]=p;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,i.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,i.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,i.kt)("pre",null,(0,i.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,i.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,i.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,i.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,i.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,i.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,i.kt)("p",null,(0,i.kt)("img",{src:t(32209).Z})),(0,i.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,i.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,i.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,i.kt)("p",null,(0,i.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168045185-f25c61b6-8727-434e-8150-e13cc4a04ade.png",alt:"python"})," "),(0,i.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"},86873:function(n,e,t){e.Z=t.p+"assets/images/python-configure-d636f45c3036219ef47fd240ba1192b7.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/828ffbf8.573b541e.js b/zh-CN/assets/js/828ffbf8.573b541e.js new file mode 100644 index 00000000000..3a759f0a5c3 --- /dev/null +++ b/zh-CN/assets/js/828ffbf8.573b541e.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[13190],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var i=t(67294);function r(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var i=Object.getOwnPropertySymbols(n);e&&(i=i.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,i)}return t}function a(n){for(var e=1;e=0||(r[t]=n[t]);return r}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(i=0;i=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(r[t]=n[t])}return r}var p=i.createContext({}),u=function(n){var e=i.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return i.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return i.createElement(i.Fragment,{},e)}},h=i.forwardRef((function(n,e){var t=n.components,r=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=r,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?i.createElement(d,a(a({ref:e},c),{},{components:t})):i.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,r=e&&e.mdxType;if("string"==typeof n||r){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:r,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.2/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(67730).Z})),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},67730:function(n,e,t){e.Z=t.p+"assets/images/python-config-63895470a36d8a8fa58eaaa44186ce23.png"},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/828ffbf8.f5c51fd0.js b/zh-CN/assets/js/828ffbf8.f5c51fd0.js deleted file mode 100644 index 2867fbdc0c5..00000000000 --- a/zh-CN/assets/js/828ffbf8.f5c51fd0.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[13190],{3905:function(n,e,t){t.d(e,{Zo:function(){return c},kt:function(){return y}});var r=t(67294);function i(n,e,t){return e in n?Object.defineProperty(n,e,{value:t,enumerable:!0,configurable:!0,writable:!0}):n[e]=t,n}function o(n,e){var t=Object.keys(n);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(n);e&&(r=r.filter((function(e){return Object.getOwnPropertyDescriptor(n,e).enumerable}))),t.push.apply(t,r)}return t}function a(n){for(var e=1;e=0||(i[t]=n[t]);return i}(n,e);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(n);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(n,t)&&(i[t]=n[t])}return i}var p=r.createContext({}),u=function(n){var e=r.useContext(p),t=e;return n&&(t="function"==typeof n?n(e):a(a({},e),n)),t},c=function(n){var e=u(n.components);return r.createElement(p.Provider,{value:e},n.children)},s={inlineCode:"code",wrapper:function(n){var e=n.children;return r.createElement(r.Fragment,{},e)}},h=r.forwardRef((function(n,e){var t=n.components,i=n.mdxType,o=n.originalType,p=n.parentName,c=l(n,["components","mdxType","originalType","parentName"]),h=u(t),y=i,d=h["".concat(p,".").concat(y)]||h[y]||s[y]||o;return t?r.createElement(d,a(a({ref:e},c),{},{components:t})):r.createElement(d,a({ref:e},c))}));function y(n,e){var t=arguments,i=e&&e.mdxType;if("string"==typeof n||i){var o=t.length,a=new Array(o);a[0]=h;var l={};for(var p in e)hasOwnProperty.call(e,p)&&(l[p]=e[p]);l.originalType=n,l.mdxType="string"==typeof n?n:i,a[1]=l;for(var u=2;u labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType \n')),(0,o.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,o.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cPython\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,o.kt)("pre",null,(0,o.kt)("code",{parentName:"pre",className:"language-shell"},'sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\\"hello\\")" -submitUser hadoop -proxyUser hadoop\n')),(0,o.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,o.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.2/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,o.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,o.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u53f3\u952e\u76ee\u5f55\u7136\u540e\u65b0\u5efapython\u811a\u672c\u5e76\u7f16\u5199python\u4ee3\u7801\u5e76\u70b9\u51fb\u6267\u884c\u3002"),(0,o.kt)("p",null,"python\u7684\u6267\u884c\u903b\u8f91\u662f\u901a\u8fc7 Py4j\u7684\u65b9\u5f0f\uff0c\u542f\u52a8\u4e00\u4e2a\u7684python\n\u7684gateway\uff0c\u7136\u540ePython\u5f15\u64ce\u5c06\u4ee3\u7801\u63d0\u4ea4\u5230python\u7684\u6267\u884c\u5668\u8fdb\u884c\u6267\u884c\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(32209).Z})),(0,o.kt)("p",null,"\u56fe3-1 python\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,o.kt)("h2",{id:"4python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.Python\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,o.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982python\u7684\u7248\u672c\u548c\u4ee5\u53capython\u9700\u8981\u52a0\u8f7d\u7684\u4e00\u4e9bmodule\u7b49\u3002"),(0,o.kt)("p",null,(0,o.kt)("img",{src:t(67730).Z})),(0,o.kt)("p",null,"\u56fe4-1 python\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}h.isMDXComponent=!0},67730:function(n,e,t){e.Z=t.p+"assets/images/python-config-63895470a36d8a8fa58eaaa44186ce23.png"},32209:function(n,e,t){e.Z=t.p+"assets/images/python-run-a442d0ab5e119eab2e0aebe935975dac.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/8a4d686c.f23e6200.js b/zh-CN/assets/js/8a4d686c.608ef6fb.js similarity index 71% rename from zh-CN/assets/js/8a4d686c.f23e6200.js rename to zh-CN/assets/js/8a4d686c.608ef6fb.js index 853e6e81fed..0f95196af28 100644 --- a/zh-CN/assets/js/8a4d686c.f23e6200.js +++ b/zh-CN/assets/js/8a4d686c.608ef6fb.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[72146],{3905:function(e,n,t){t.d(n,{Zo:function(){return k},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),u=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},k=function(e){var n=u(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,k=p(e,["components","mdxType","originalType","parentName"]),c=u(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},k),{},{components:t})):a.createElement(g,i({ref:n},k))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var u=2;u 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002 "),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044322-ce057ec0-8891-4691-9454-8fba45b2c631.png",alt:"yarn"})," "),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/latest/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'#You can also add the queue value in the StartUpMap of the submission parameter: \nstartupMap.put("wds.linkis.rm.yarnqueue", "dws")\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/latest/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044389-55aea9de-6dfa-4b57-81a6-220e242f9eec.png",alt:"spark"})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[72146],{3905:function(e,n,t){t.d(n,{Zo:function(){return o},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),k=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},o=function(e){var n=k(e.components);return a.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,o=p(e,["components","mdxType","originalType","parentName"]),c=k(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||u[d]||l;return t?a.createElement(g,i(i({ref:n},o),{},{components:t})):a.createElement(g,i({ref:n},o))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var k=2;k 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002 "),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044322-ce057ec0-8891-4691-9454-8fba45b2c631.png",alt:"yarn"})," "),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/latest/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType\u5bf9\u5e94\u5173\u7cfb py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# \u53ef\u4ee5\u5728\u63d0\u4ea4\u53c2\u6570\u901a\u8fc7-confMap wds.linkis.yarnqueue=dws \u6765\u6307\u5b9ayarn \u961f\u5217\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/latest/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{parentName:"p",src:"https://user-images.githubusercontent.com/29391030/168044389-55aea9de-6dfa-4b57-81a6-220e242f9eec.png",alt:"spark"})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/9dd8a0d2.d5b54caf.js b/zh-CN/assets/js/9dd8a0d2.d5b54caf.js new file mode 100644 index 00000000000..ba5813d2241 --- /dev/null +++ b/zh-CN/assets/js/9dd8a0d2.d5b54caf.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[87054,48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),r=a(72389),i=a(44996),c=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,r.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==c?void 0:c[e];return n.createElement("div",null,n.createElement("script",{src:"//cdn.matomo.cloud/apachelinkis.matomo.cloud/matomo.js"}),n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,i.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}},66206:function(e,t,a){a.r(t),a.d(t,{default:function(){return j}});var n=a(67294);var r=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)},i="object"==typeof global&&global&&global.Object===Object&&global,c="object"==typeof self&&self&&self.Object===Object&&self,o=i||c||Function("return this")(),l=function(){return o.Date.now()},s=/\s/;var m=function(e){for(var t=e.length;t--&&s.test(e.charAt(t)););return t},d=/^\s+/;var u=function(e){return e?e.slice(0,m(e)+1).replace(d,""):e},p=o.Symbol,h=Object.prototype,g=h.hasOwnProperty,f=h.toString,b=p?p.toStringTag:void 0;var v=function(e){var t=g.call(e,b),a=e[b];try{e[b]=void 0;var n=!0}catch(i){}var r=f.call(e);return n&&(t?e[b]=a:delete e[b]),r},A=Object.prototype.toString;var y=function(e){return A.call(e)},E=p?p.toStringTag:void 0;var N=function(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":E&&E in Object(e)?v(e):y(e)};var k=function(e){return null!=e&&"object"==typeof e};var w=function(e){return"symbol"==typeof e||k(e)&&"[object Symbol]"==N(e)},S=/^[-+]0x[0-9a-f]+$/i,x=/^0b[01]+$/i,T=/^0o[0-7]+$/i,C=parseInt;var L=function(e){if("number"==typeof e)return e;if(w(e))return NaN;if(r(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=r(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=u(e);var a=x.test(e);return a||T.test(e)?C(e.slice(2),a?2:8):S.test(e)?NaN:+e},z=Math.max,I=Math.min;var D=function(e,t,a){var n,i,c,o,s,m,d=0,u=!1,p=!1,h=!0;if("function"!=typeof e)throw new TypeError("Expected a function");function g(t){var a=n,r=i;return n=i=void 0,d=t,o=e.apply(r,a)}function f(e){return d=e,s=setTimeout(v,t),u?g(e):o}function b(e){var a=e-m;return void 0===m||a>=t||a<0||p&&e-d>=c}function v(){var e=l();if(b(e))return A(e);s=setTimeout(v,function(e){var a=t-(e-m);return p?I(a,c-(e-d)):a}(e))}function A(e){return s=void 0,h&&n?g(e):(n=i=void 0,o)}function y(){var e=l(),a=b(e);if(n=arguments,i=this,m=e,a){if(void 0===s)return f(m);if(p)return clearTimeout(s),s=setTimeout(v,t),g(m)}return void 0===s&&(s=setTimeout(v,t)),o}return t=L(t)||0,r(a)&&(u=!!a.leading,c=(p="maxWait"in a)?z(L(a.maxWait)||0,t):c,h="trailing"in a?!!a.trailing:h),y.cancel=function(){void 0!==s&&clearTimeout(s),d=0,n=m=i=s=void 0},y.flush=function(){return void 0===s?o:A(l())},y};var F=function(e,t,a){var n=!0,i=!0;if("function"!=typeof e)throw new TypeError("Expected a function");return r(a)&&(n="leading"in a?!!a.leading:n,i="trailing"in a?!!a.trailing:i),D(e,t,{leading:n,maxWait:t,trailing:i})},U=a(89276),B=a(52263),q=a(88458),R=a(72389);function j(){var e=(0,R.Z)(),t=(0,B.Z)().siteConfig,a=e&&location.pathname,r=function(){return"/"===a||"/zh-CN/"===a};return(0,n.useEffect)((function(){if(e){var t=document.getElementsByTagName("nav")[0],a=t&&t.classList;if(!a)return;r()?a.add("index-nav"):a.remove("index-nav"),window.onscroll=F((function(e){try{r()&&(e.target.scrollingElement.scrollTop>0?a.remove("index-nav"):a.add("index-nav"))}catch(t){console.warn(t)}}),150)}}),[e,a]),n.createElement(U.Z,{title:t.title,description:"Description will go into a meta tag in "},n.createElement("main",null,n.createElement(q.default,null)))}}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/9dd8a0d2.e59e7145.js b/zh-CN/assets/js/9dd8a0d2.e59e7145.js deleted file mode 100644 index 225731f8da7..00000000000 --- a/zh-CN/assets/js/9dd8a0d2.e59e7145.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[87054,48360],{88458:function(e,t,a){a.r(t),a.d(t,{default:function(){return l}});var n=a(67294),r=a(72389),i=a(44996),c=JSON.parse('{"zh-CN":{"common":{"getStart":"\u5f00\u59cb","description":"\u63cf\u8ff0","learnMore":"\u4e86\u89e3\u66f4\u591a","coreFeatures":"\u6838\u5fc3\u7279\u6027","connectivity":"\u8fde\u901a","scalability":"\u6269\u5c55","controllability":"\u7ba1\u63a7","orchestration":"\u7f16\u6392","reusability":"\u590d\u7528","ourUsers":"Our Users","readMore":"\u9605\u8bfb\u66f4\u591a","download":"\u4e0b\u8f7d","releaseDate":"\u53d1\u5e03\u65e5\u671f","newFeatures":"\u65b0\u7279\u6027","enhancement":"\u589e\u5f3a\u70b9","bugFixs":"Bug\u4fee\u590d","changeLog":"\u8be6\u7ec6\u53d8\u66f4"},"home":{"banner":{"slogan":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce,\u540c\u65f6\u5b9e\u73b0\u8de8\u5f15\u64ce\u4e0a\u4e0b\u6587\u5171\u4eab\u3001\u7edf\u4e00\u7684\u8ba1\u7b97\u4efb\u52a1\u548c\u5f15\u64ce\u6cbb\u7406\u4e0e\u7f16\u6392\u80fd\u529b\u3002"},"introduce":{"title":"\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u6982\u5ff5","before":"\u6ca1\u6709Linkis\u4e4b\u524d","after":"\u6709Linkis\u4e4b\u540e","beforeText":"\u4e0a\u5c42\u5e94\u7528\u4ee5\u7d27\u8026\u5408\u65b9\u5f0f\u76f4\u8fde\u5e95\u5c42\u5f15\u64ce\uff0c\u4f7f\u5f97\u6570\u636e\u5e73\u53f0\u53d8\u6210\u590d\u6742\u7684\u7f51\u72b6\u7ed3\u6784","afterText":"\u901a\u8fc7\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u5c06\u5e94\u7528\u5c42\u548c\u5f15\u64ce\u5c42\u89e3\u8026\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u65b9\u5f0f\u7b80\u5316\u590d\u6742\u7684\u7f51\u72b6\u8c03\u7528\u5173\u7cfb\uff0c\u964d\u4f4e\u6570\u636e\u5e73\u53f0\u590d\u6742\u5ea6"},"description":{"standardizedInterfaces":"\u6807\u51c6\u63a5\u53e3","computationGovernance":"\u8ba1\u7b97\u6cbb\u7406","paragraph1":"Linkis \u5728\u4e0a\u5c42\u5e94\u7528\u548c\u5e95\u5c42\u5f15\u64ce\u4e4b\u95f4\u6784\u5efa\u4e86\u4e00\u5c42\u8ba1\u7b97\u4e2d\u95f4\u4ef6\u3002\u901a\u8fc7\u4f7f\u7528Linkis \u63d0\u4f9b\u7684REST/WebSocket/JDBC \u7b49\u6807\u51c6\u63a5\u53e3\uff0c\u4e0a\u5c42\u5e94\u7528\u53ef\u4ee5\u65b9\u4fbf\u5730\u8fde\u63a5\u8bbf\u95eeSpark, Presto, Flink \u7b49\u5e95\u5c42\u5f15\u64ce\u3002","paragraph2":"Linkis\u63d0\u4f9b\u4e86\u5f3a\u5927\u7684\u8fde\u901a\u3001\u590d\u7528\u3001\u7f16\u6392\u3001\u6269\u5c55\u548c\u6cbb\u7406\u7ba1\u63a7\u80fd\u529b\uff0c\u4ee5\u6807\u51c6\u5316\u53ef\u590d\u7528\u7684\u65b9\u5f0f\u89e3\u51b3 OLAP\u3001OLTP(\u5b9e\u73b0\u4e2d)\u3001Streaming\u7b49\u4e0d\u540c\u7c7b\u578b\u5f15\u64ce\u7684\u8ba1\u7b97\u6cbb\u7406\u95ee\u9898\u3002"},"core":{"connectivity":"\u7b80\u5316\u8fd0\u7ef4\u73af\u5883\uff1b\u89e3\u8026\u4e0a\u4e0b\u5c42\uff0c\u5e95\u5c42\u53d8\u5316\u900f\u660e\u5316\uff1b\u6253\u901a\u7528\u6237\u8d44\u6e90\u548c\u8fd0\u884c\u65f6\u73af\u5883\uff0c\u544a\u522b\u5e94\u7528\u5b64\u5c9b","scalability":"\u5206\u5e03\u5f0f\u5fae\u670d\u52a1\u67b6\u6784\u4f53\u7cfb\uff0c\u89e3\u51b3\u9ad8\u5e76\u53d1\u3001\u9ad8\u53ef\u7528\u3001\u591a\u79df\u6237\u7b49\u95ee\u9898\uff1b\u57fa\u4e8eEngineConn\u63d2\u4ef6\u53ef\u5feb\u901f\u5bf9\u63a5\u65b0\u5f15\u64ce","controllability":"\u6536\u655b\u5f15\u64ce\u5165\u53e3\uff0c\u7edf\u4e00\u8eab\u4efd\u9a8c\u8bc1\u3001\u9ad8\u5371\u9632\u63a7\u3001\u5ba1\u8ba1\u8bb0\u5f55;\u57fa\u4e8e\u6807\u7b7e\u7684\u591a\u7ea7\u7cbe\u7ec6\u5316\u8d44\u6e90\u63a7\u5236\u548c\u56de\u6536\u80fd\u529b","orchestration":"\u57fa\u4e8eOrchestrator \u670d\u52a1\u7684\u6df7\u7b97\u3001\u53cc\u6d3b\u8ba1\u7b97\u7b56\u7565\u8bbe\u8ba1(\u5b9e\u73b0\u4e2d)","reusability":"\u6781\u5927\u964d\u4f4e\u4e0a\u5c42\u5e94\u7528\u7684\u540e\u53f0\u4ee3\u7801\u91cf\uff1b\u53ef\u57fa\u4e8eLinkis \u5feb\u901f\u9ad8\u6548\u6253\u9020\u6570\u636e\u5e73\u53f0\u5de5\u5177\u5957\u4ef6"}}},"en":{"common":{"getStart":"Get Start","description":"Description","learnMore":"Learn More","coreFeatures":"Core Features","connectivity":"Connectivity","scalability":"Scalability","controllability":"Controllability","orchestration":"Orchestration","reusability":"Reusability","ourUsers":"Our Users","readMore":"Read More","download":"Download","releaseDate":"Release Date","newFeatures":"New Features","enhancement":"Enhancement","bugFixs":"Bug Fixs","changeLog":"Change Log"},"home":{"banner":{"slogan":"Linkis builds a computation middleware layer to decouple the upper applications and the underlying data engines, provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), while enables cross engine context sharing, unified job& engine governance and orchestration."},"introduce":{"title":"Computation Middleware","before":"Before","after":"After","beforeText":"Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.","afterText":"Build a common layer of \\"computation middleware\\" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way\\n"},"description":{"standardizedInterfaces":"Standardized Interfaces","computationGovernance":"Computation Governance","paragraph1":"Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.","paragraph2":"Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these \\"computation governance\\" affairs in a standardized reusable way."},"core":{"connectivity":"Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed","scalability":"Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine","controllability":"Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities","orchestration":"Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service","reusability":"Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis"}}}}'),o={github:{projectUrl:"https://github.com/apache/incubator-linkis",projectReleaseUrl:"https://github.com/apache/incubator-linkis/releases",projectIssueUrl:"https://github.com/apache/incubator-linkis/issues",projectPrUrl:"https://github.com/apache/incubator-linkis/pulls"}};function l(){var e=(0,r.Z)()&&0===location.pathname.indexOf("/zh-CN/")?"zh-CN":"en",t=null==c?void 0:c[e];return n.createElement("div",null,n.createElement("div",{className:"home-page slogan"},n.createElement("div",{className:"ctn-block"},n.createElement("div",{className:"banner text-center"},n.createElement("h1",{className:"home-title"},n.createElement("span",{className:"apache"},"Apache")," ",n.createElement("span",{className:"linkis"},"Linkis")," ",n.createElement("span",{className:"badge"},"Incubating")),n.createElement("p",{className:"home-desc"},t.home.banner.slogan),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/deployment/quick_deploy",className:"corner-botton blue-fill"},t.common.getStart),n.createElement("a",{href:o.github.projectUrl,target:"_blank",className:"corner-botton blue"},n.createElement("img",{className:"button-icon",src:"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABwAAAAcCAYAAAByDd+UAAAAAXNSR0IArs4c6QAAAERlWElmTU0AKgAAAAgAAYdpAAQAAAABAAAAGgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAHKADAAQAAAABAAAAHAAAAABkvfSiAAAE2klEQVRIDa1WSyykWRQ+qrwf8YzQ3iTKWyrxmoWoWJHMoqIRKWErs7AYk1jIWDS9YCdshQWxQMsIYUE6Wm1qiJAIimjSqPaI8hrEu+Z8d9xKVak2mDnJrfvXueee79zzuteFXkApKSlqNzc3rYuLSz6PCN7y7nHbd4vFYrq/v9fz+GN5eXn+39S5PCeQmppaykAfGUT1nJxcY9BVHr8vLS0NSp7j7BQwIyMjjgX7FApFHoM57nn2P5+Y7u7uDN7e3rqZmZlNR+En2tRqdQELfXp4eAiGsASUM3hQCnLkST7W2Fizq6vr+9nZ2S/4L0kpPzA/gk2wsG9iYiJ5eXnR1dUVMbgYUhZAGOBLEPz39fWlmJgY8vHxodPTU29eq4yLi5ve2dn5Zt0rP3JycuJub29ncTJsampqgpW0uLhI/f39tLu7SxxP8vPzI3aXADs/P6eLiwsBymGgkpISio+Pp/X1daqvrxfyHFMz68seGRkR7nWVgJeXl32sMJj9TwkJCaRSqcjT05PS0tIoPT2dVldXKTo6moKCgkipVAoQNpD29vbIbDZTbm4uRUQggUl4BqeEd1g2+OvXr33M/glrAvAxG/PAgIvc3d3tXAVAANvGDLIgGIY9jmvwxvX1tZDhWOZpNJrSqampQQU4bMVHscI/2Mj+J1hvS44Kn1vDyTAkwSP7+/sCQ5GVlaVmhqgzWArLuNDFLDe8doY7MzMz7RKN9aqqq6vVCg6qVipE/CIjI0mr1Yo4SP5r5/DwcKqoqCB2pRUUp1xZWdEq+FT5UiEAOY2twZf8t8woKwBDp6Sbm5t8Bcfmn9RiLsogNDRUzFLorXNAQAAFBgZakw96gIWkkY1Y6EYx/x/EobK600bfO5GlkgGwk5MTZ4JS5MUzGgIaA7xmQxbE8LtkYBGFjLL4r3RwcECHh4d2gIy1C3iTVI6SWFtbI4PBIFlvmlHw4+PjdHZ2JroSlKDkPDw8TAoG0UutKG7OJOrt7SXu8pL9qhmxGxoaosnJSSsYFICfnJysVxYXF59ub2/XwJ0hISHCBSaTiTBQR2FhYbDsRaBbW1s0MDAgBlxqGz8chGvzV3Efcq80snIVijUqKooGBwdpc3NTNAHUE1smeiZ3JdHQbdER87m5OXFD8E1P3Kjp+PjYVkTUIpfJql6vTxL3YUFBwR5fP+UIMpq0RqMhbAYorIZCNPTCwsInTRrZ2NLSQqxMeIVvHQEmey9ih+JnT/4yPT29LAD58bPMV0/R0dFRJDK0qKhItDYYgJaEi7WyslJ0ITvT+Q/uRhiE6wsgckg5lFpsbKyhs7PzN/Cs9yG7U9fT0zNrNBqD5+fnRT9FE4d7kHVwpzNCnNDCnBFOx43cXFtbqxsdHRUi1ifGxMTEiU6n+3NjY6OShxIlIu9BJBNaFZLIGfFjiRYWFuzcDTDWcVtTU/NzWVnZgtz35BHV2NhYMDw8/ImFg/39/eUzgTo6OigpKUnus5vb29upu7tbAMqYcRjMdXV178vLy+0eUXZ9B1qam5u/VFVVZfPbxYB3DLIQsURa/4gAAkJy4OLmzDY0NDRkO4L9aL+V39raWsqZaeRnhIUfU6zXObW1tVn49BZ2nbGrq6vUquCtH2NjY2rO3g8M95nHKo+/Hge+P3PtfYDMS/T/DaQGbM8QvzFuAAAAAElFTkSuQmCC",alt:"github"}),n.createElement("span",null,"GitHub")))))),n.createElement("div",{className:"home-page introduce"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.home.introduce.title),n.createElement("div",{className:"concept home-block"},n.createElement("div",{className:"concept-item before"},n.createElement("h3",{className:"concept-title"},t.home.introduce.before),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.beforeText),n.createElement("div",{className:"before-image"},"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/before_linkis_zh.png"),alt:"before",className:"concept-image"})))),n.createElement("div",{className:"concept-item after"},n.createElement("h3",{className:"concept-title"},t.home.introduce.after),n.createElement("div",{className:"concept-ctn"},n.createElement("p",{className:"home-paragraph"},t.home.introduce.afterText),"en"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_en.png"),alt:"before",className:"concept-image"}),"zh-CN"===e&&n.createElement("img",{src:(0,i.Z)("/home/after_linkis_zh.png"),alt:"before",className:"concept-image"})))))),n.createElement("div",{className:"home-page"},n.createElement("div",{className:"ctn-block description"},n.createElement("h1",{className:"home-block-title text-center"},t.common.description),n.createElement("div",{className:"home-block",style:{position:"relative"}},n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.standardizedInterfaces),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph1)),n.createElement("div",{className:"bold-dot",style:{top:"64px",left:"416px"}}),n.createElement("div",{className:"bold-dot",style:{top:"728px",left:"240px"}}),n.createElement("img",{src:(0,i.Z)("/home/description.png"),alt:"description",className:"description-image"}),n.createElement("svg",{width:"860",height:"860",viewBox:"0 0 100 100"},n.createElement("circle",{cx:"50",cy:"50",r:"49.8",className:"dotted"})),n.createElement("div",{className:"top-desc"},n.createElement("h3",{className:"home-paragraph-title"},t.home.description.computationGovernance),n.createElement("p",{className:"home-paragraph"},t.home.description.paragraph2)),n.createElement("div",{className:"botton-row center"},"en"===e&&n.createElement("a",{href:"/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore),"zh-CN"===e&&n.createElement("a",{href:"/zh-CN/docs/latest/introduction",className:"corner-botton blue-fill"},t.common.learnMore))))),n.createElement("div",{className:"home-page feature"},n.createElement("div",{className:"ctn-block"},n.createElement("h1",{className:"home-block-title text-center"},t.common.coreFeatures),n.createElement("div",{className:"features home-block text-center"},n.createElement("div",{className:"feature-item connectivity"},n.createElement("h3",{className:"item-title"},t.common.connectivity),n.createElement("p",{className:"item-desc"},t.home.core.connectivity)),n.createElement("div",{className:"feature-item scalability"},n.createElement("h3",{className:"item-title"},t.common.scalability),n.createElement("p",{className:"item-desc"},t.home.core.scalability)),n.createElement("div",{className:"feature-item controllability"},n.createElement("h3",{className:"item-title"},t.common.controllability),n.createElement("p",{className:"item-desc"},t.home.core.controllability)),n.createElement("div",{className:"feature-item orchestration"},n.createElement("h3",{className:"item-title"},t.common.orchestration),n.createElement("p",{className:"item-desc"},t.home.core.orchestration)),n.createElement("div",{className:"feature-item reusability"},n.createElement("h3",{className:"item-title"},t.common.reusability),n.createElement("p",{className:"item-desc"},t.home.core.reusability))))))}},66206:function(e,t,a){a.r(t),a.d(t,{default:function(){return P}});var n=a(67294);var r=function(e){var t=typeof e;return null!=e&&("object"==t||"function"==t)},i="object"==typeof global&&global&&global.Object===Object&&global,c="object"==typeof self&&self&&self.Object===Object&&self,o=i||c||Function("return this")(),l=function(){return o.Date.now()},s=/\s/;var m=function(e){for(var t=e.length;t--&&s.test(e.charAt(t)););return t},d=/^\s+/;var u=function(e){return e?e.slice(0,m(e)+1).replace(d,""):e},p=o.Symbol,h=Object.prototype,g=h.hasOwnProperty,f=h.toString,b=p?p.toStringTag:void 0;var v=function(e){var t=g.call(e,b),a=e[b];try{e[b]=void 0;var n=!0}catch(i){}var r=f.call(e);return n&&(t?e[b]=a:delete e[b]),r},A=Object.prototype.toString;var y=function(e){return A.call(e)},E=p?p.toStringTag:void 0;var N=function(e){return null==e?void 0===e?"[object Undefined]":"[object Null]":E&&E in Object(e)?v(e):y(e)};var k=function(e){return null!=e&&"object"==typeof e};var w=function(e){return"symbol"==typeof e||k(e)&&"[object Symbol]"==N(e)},S=/^[-+]0x[0-9a-f]+$/i,x=/^0b[01]+$/i,T=/^0o[0-7]+$/i,C=parseInt;var L=function(e){if("number"==typeof e)return e;if(w(e))return NaN;if(r(e)){var t="function"==typeof e.valueOf?e.valueOf():e;e=r(t)?t+"":t}if("string"!=typeof e)return 0===e?e:+e;e=u(e);var a=x.test(e);return a||T.test(e)?C(e.slice(2),a?2:8):S.test(e)?NaN:+e},z=Math.max,I=Math.min;var D=function(e,t,a){var n,i,c,o,s,m,d=0,u=!1,p=!1,h=!0;if("function"!=typeof e)throw new TypeError("Expected a function");function g(t){var a=n,r=i;return n=i=void 0,d=t,o=e.apply(r,a)}function f(e){return d=e,s=setTimeout(v,t),u?g(e):o}function b(e){var a=e-m;return void 0===m||a>=t||a<0||p&&e-d>=c}function v(){var e=l();if(b(e))return A(e);s=setTimeout(v,function(e){var a=t-(e-m);return p?I(a,c-(e-d)):a}(e))}function A(e){return s=void 0,h&&n?g(e):(n=i=void 0,o)}function y(){var e=l(),a=b(e);if(n=arguments,i=this,m=e,a){if(void 0===s)return f(m);if(p)return clearTimeout(s),s=setTimeout(v,t),g(m)}return void 0===s&&(s=setTimeout(v,t)),o}return t=L(t)||0,r(a)&&(u=!!a.leading,c=(p="maxWait"in a)?z(L(a.maxWait)||0,t):c,h="trailing"in a?!!a.trailing:h),y.cancel=function(){void 0!==s&&clearTimeout(s),d=0,n=m=i=s=void 0},y.flush=function(){return void 0===s?o:A(l())},y};var F=function(e,t,a){var n=!0,i=!0;if("function"!=typeof e)throw new TypeError("Expected a function");return r(a)&&(n="leading"in a?!!a.leading:n,i="trailing"in a?!!a.trailing:i),D(e,t,{leading:n,maxWait:t,trailing:i})},U=a(89276),B=a(52263),q=a(88458),R=a(72389);function P(){var e=(0,R.Z)(),t=(0,B.Z)().siteConfig,a=e&&location.pathname,r=function(){return"/"===a||"/zh-CN/"===a};return(0,n.useEffect)((function(){if(e){var t=document.getElementsByTagName("nav")[0],a=t&&t.classList;if(!a)return;r()?a.add("index-nav"):a.remove("index-nav"),window.onscroll=F((function(e){try{r()&&(e.target.scrollingElement.scrollTop>0?a.remove("index-nav"):a.add("index-nav"))}catch(t){console.warn(t)}}),150)}}),[e,a]),n.createElement(U.Z,{title:t.title,description:"Description will go into a meta tag in "},n.createElement("main",null,n.createElement(q.default,null)))}}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/dedbedf9.39deda53.js b/zh-CN/assets/js/dedbedf9.5f027f6b.js similarity index 70% rename from zh-CN/assets/js/dedbedf9.39deda53.js rename to zh-CN/assets/js/dedbedf9.5f027f6b.js index 93bd47282db..65977c209cd 100644 --- a/zh-CN/assets/js/dedbedf9.39deda53.js +++ b/zh-CN/assets/js/dedbedf9.5f027f6b.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[51841],{3905:function(e,n,t){t.d(n,{Zo:function(){return k},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),u=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},k=function(e){var n=u(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,k=p(e,["components","mdxType","originalType","parentName"]),c=u(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},k),{},{components:t})):a.createElement(g,i({ref:n},k))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var u=2;u","\u6807\u7b7e\u8fdb\u884c\u6539\u62102.1.0\uff0c\u7136\u540e\u5355\u72ec\u7f16\u8bd1\u6b64\u6a21\u5757\u5373\u53ef\u3002"),(0,l.kt)("h3",{id:"22-spark-engineconn\u90e8\u7f72\u548c\u52a0\u8f7d"},"2.2 spark engineConn\u90e8\u7f72\u548c\u52a0\u8f7d"),(0,l.kt)("p",null,"\u5982\u679c\u60a8\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u4e86\u60a8\u7684spark\u5f15\u64ce\u7684\u63d2\u4ef6\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u90a3\u4e48\u60a8\u9700\u8981\u5c06\u65b0\u7684\u63d2\u4ef6\u653e\u7f6e\u5230\u6307\u5b9a\u7684\u4f4d\u7f6e\u4e2d\u624d\u80fd\u52a0\u8f7d\uff0c\u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u8fd9\u7bc7\u6587\u7ae0"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5")," "),(0,l.kt)("h3",{id:"23-spark\u5f15\u64ce\u7684\u6807\u7b7e"},"2.3 spark\u5f15\u64ce\u7684\u6807\u7b7e"),(0,l.kt)("p",null,"Linkis1.0\u662f\u901a\u8fc7\u6807\u7b7e\u6765\u8fdb\u884c\u7684\uff0c\u6240\u4ee5\u9700\u8981\u5728\u6211\u4eec\u6570\u636e\u5e93\u4e2d\u63d2\u5165\u6570\u636e\uff0c\u63d2\u5165\u7684\u65b9\u5f0f\u5982\u4e0b\u6587\u6240\u793a\u3002"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5 > 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u662f\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(90388).Z})),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.3/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'You can also add the queue value in the StartUpMap of the submission parameter: `startupMap.put("wds.linkis.rm.yarnqueue", "dws")`\n\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(72746).Z})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},90388:function(e,n,t){n.Z=t.p+"assets/images/queue-set-e89c51e5b7d25d78a78580b122e4e64c.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},72746:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-2b013d6df48bcafd6b6b672f44039eab.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[51841],{3905:function(e,n,t){t.d(n,{Zo:function(){return o},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),k=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},o=function(e){var n=k(e.components);return a.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,o=p(e,["components","mdxType","originalType","parentName"]),c=k(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||u[d]||l;return t?a.createElement(g,i(i({ref:n},o),{},{components:t})):a.createElement(g,i({ref:n},o))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var k=2;k","\u6807\u7b7e\u8fdb\u884c\u6539\u62102.1.0\uff0c\u7136\u540e\u5355\u72ec\u7f16\u8bd1\u6b64\u6a21\u5757\u5373\u53ef\u3002"),(0,l.kt)("h3",{id:"22-spark-engineconn\u90e8\u7f72\u548c\u52a0\u8f7d"},"2.2 spark engineConn\u90e8\u7f72\u548c\u52a0\u8f7d"),(0,l.kt)("p",null,"\u5982\u679c\u60a8\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u4e86\u60a8\u7684spark\u5f15\u64ce\u7684\u63d2\u4ef6\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u90a3\u4e48\u60a8\u9700\u8981\u5c06\u65b0\u7684\u63d2\u4ef6\u653e\u7f6e\u5230\u6307\u5b9a\u7684\u4f4d\u7f6e\u4e2d\u624d\u80fd\u52a0\u8f7d\uff0c\u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u8fd9\u7bc7\u6587\u7ae0"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5")," "),(0,l.kt)("h3",{id:"23-spark\u5f15\u64ce\u7684\u6807\u7b7e"},"2.3 spark\u5f15\u64ce\u7684\u6807\u7b7e"),(0,l.kt)("p",null,"Linkis1.0\u662f\u901a\u8fc7\u6807\u7b7e\u6765\u8fdb\u884c\u7684\uff0c\u6240\u4ee5\u9700\u8981\u5728\u6211\u4eec\u6570\u636e\u5e93\u4e2d\u63d2\u5165\u6570\u636e\uff0c\u63d2\u5165\u7684\u65b9\u5f0f\u5982\u4e0b\u6587\u6240\u793a\u3002"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5 > 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u662f\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(90388).Z})),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.3/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType\u5bf9\u5e94\u5173\u7cfb py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# \u53ef\u4ee5\u5728\u63d0\u4ea4\u53c2\u6570\u901a\u8fc7-confMap wds.linkis.yarnqueue=dws \u6765\u6307\u5b9ayarn \u961f\u5217\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.0.3/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(72746).Z})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},90388:function(e,n,t){n.Z=t.p+"assets/images/queue-set-e89c51e5b7d25d78a78580b122e4e64c.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},72746:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-2b013d6df48bcafd6b6b672f44039eab.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/f4730b20.2149dc0e.js b/zh-CN/assets/js/f4730b20.dea2074e.js similarity index 70% rename from zh-CN/assets/js/f4730b20.2149dc0e.js rename to zh-CN/assets/js/f4730b20.dea2074e.js index ece48fef769..5047168f9ff 100644 --- a/zh-CN/assets/js/f4730b20.2149dc0e.js +++ b/zh-CN/assets/js/f4730b20.dea2074e.js @@ -1 +1 @@ -"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[94439],{3905:function(e,n,t){t.d(n,{Zo:function(){return k},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),u=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},k=function(e){var n=u(e.components);return a.createElement(s.Provider,{value:n},e.children)},o={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,k=p(e,["components","mdxType","originalType","parentName"]),c=u(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||o[d]||l;return t?a.createElement(g,i(i({ref:n},k),{},{components:t})):a.createElement(g,i({ref:n},k))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var u=2;u","\u6807\u7b7e\u8fdb\u884c\u6539\u62102.1.0\uff0c\u7136\u540e\u5355\u72ec\u7f16\u8bd1\u6b64\u6a21\u5757\u5373\u53ef\u3002"),(0,l.kt)("h3",{id:"22-spark-engineconn\u90e8\u7f72\u548c\u52a0\u8f7d"},"2.2 spark engineConn\u90e8\u7f72\u548c\u52a0\u8f7d"),(0,l.kt)("p",null,"\u5982\u679c\u60a8\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u4e86\u60a8\u7684spark\u5f15\u64ce\u7684\u63d2\u4ef6\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u90a3\u4e48\u60a8\u9700\u8981\u5c06\u65b0\u7684\u63d2\u4ef6\u653e\u7f6e\u5230\u6307\u5b9a\u7684\u4f4d\u7f6e\u4e2d\u624d\u80fd\u52a0\u8f7d\uff0c\u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u8fd9\u7bc7\u6587\u7ae0"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5")," "),(0,l.kt)("h3",{id:"23-spark\u5f15\u64ce\u7684\u6807\u7b7e"},"2.3 spark\u5f15\u64ce\u7684\u6807\u7b7e"),(0,l.kt)("p",null,"Linkis1.0\u662f\u901a\u8fc7\u6807\u7b7e\u6765\u8fdb\u884c\u7684\uff0c\u6240\u4ee5\u9700\u8981\u5728\u6211\u4eec\u6570\u636e\u5e93\u4e2d\u63d2\u5165\u6570\u636e\uff0c\u63d2\u5165\u7684\u65b9\u5f0f\u5982\u4e0b\u6587\u6240\u793a\u3002"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5 > 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u662f\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(90388).Z})),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.0/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'You can also add the queue value in the StartUpMap of the submission parameter: `startupMap.put("wds.linkis.rm.yarnqueue", "dws")`\n\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.0/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(72746).Z})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},90388:function(e,n,t){n.Z=t.p+"assets/images/queue-set-e89c51e5b7d25d78a78580b122e4e64c.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},72746:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-2b013d6df48bcafd6b6b672f44039eab.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file +"use strict";(self.webpackChunklinkis_web_apache=self.webpackChunklinkis_web_apache||[]).push([[94439],{3905:function(e,n,t){t.d(n,{Zo:function(){return o},kt:function(){return d}});var a=t(67294);function r(e,n,t){return n in e?Object.defineProperty(e,n,{value:t,enumerable:!0,configurable:!0,writable:!0}):e[n]=t,e}function l(e,n){var t=Object.keys(e);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);n&&(a=a.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),t.push.apply(t,a)}return t}function i(e){for(var n=1;n=0||(r[t]=e[t]);return r}(e,n);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(a=0;a=0||Object.prototype.propertyIsEnumerable.call(e,t)&&(r[t]=e[t])}return r}var s=a.createContext({}),k=function(e){var n=a.useContext(s),t=n;return e&&(t="function"==typeof e?e(n):i(i({},n),e)),t},o=function(e){var n=k(e.components);return a.createElement(s.Provider,{value:n},e.children)},u={inlineCode:"code",wrapper:function(e){var n=e.children;return a.createElement(a.Fragment,{},n)}},c=a.forwardRef((function(e,n){var t=e.components,r=e.mdxType,l=e.originalType,s=e.parentName,o=p(e,["components","mdxType","originalType","parentName"]),c=k(t),d=r,g=c["".concat(s,".").concat(d)]||c[d]||u[d]||l;return t?a.createElement(g,i(i({ref:n},o),{},{components:t})):a.createElement(g,i({ref:n},o))}));function d(e,n){var t=arguments,r=n&&n.mdxType;if("string"==typeof e||r){var l=t.length,i=new Array(l);i[0]=c;var p={};for(var s in n)hasOwnProperty.call(n,s)&&(p[s]=n[s]);p.originalType=e,p.mdxType="string"==typeof e?e:r,i[1]=p;for(var k=2;k","\u6807\u7b7e\u8fdb\u884c\u6539\u62102.1.0\uff0c\u7136\u540e\u5355\u72ec\u7f16\u8bd1\u6b64\u6a21\u5757\u5373\u53ef\u3002"),(0,l.kt)("h3",{id:"22-spark-engineconn\u90e8\u7f72\u548c\u52a0\u8f7d"},"2.2 spark engineConn\u90e8\u7f72\u548c\u52a0\u8f7d"),(0,l.kt)("p",null,"\u5982\u679c\u60a8\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u4e86\u60a8\u7684spark\u5f15\u64ce\u7684\u63d2\u4ef6\u5df2\u7ecf\u7f16\u8bd1\u5b8c\u6210\uff0c\u90a3\u4e48\u60a8\u9700\u8981\u5c06\u65b0\u7684\u63d2\u4ef6\u653e\u7f6e\u5230\u6307\u5b9a\u7684\u4f4d\u7f6e\u4e2d\u624d\u80fd\u52a0\u8f7d\uff0c\u5177\u4f53\u53ef\u4ee5\u53c2\u8003\u4e0b\u9762\u8fd9\u7bc7\u6587\u7ae0"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5")," "),(0,l.kt)("h3",{id:"23-spark\u5f15\u64ce\u7684\u6807\u7b7e"},"2.3 spark\u5f15\u64ce\u7684\u6807\u7b7e"),(0,l.kt)("p",null,"Linkis1.0\u662f\u901a\u8fc7\u6807\u7b7e\u6765\u8fdb\u884c\u7684\uff0c\u6240\u4ee5\u9700\u8981\u5728\u6211\u4eec\u6570\u636e\u5e93\u4e2d\u63d2\u5165\u6570\u636e\uff0c\u63d2\u5165\u7684\u65b9\u5f0f\u5982\u4e0b\u6587\u6240\u793a\u3002"),(0,l.kt)("p",null,(0,l.kt)("a",{parentName:"p",href:"../deployment/engine_conn_plugin_installation"},"EngineConnPlugin\u5f15\u64ce\u63d2\u4ef6\u5b89\u88c5 > 2.2 \u7ba1\u7406\u53f0Configuration\u914d\u7f6e\u4fee\u6539\uff08\u53ef\u9009\uff09")," "),(0,l.kt)("h2",{id:"3spark\u5f15\u64ce\u7684\u4f7f\u7528"},"3.spark\u5f15\u64ce\u7684\u4f7f\u7528"),(0,l.kt)("h3",{id:"\u51c6\u5907\u64cd\u4f5c\u961f\u5217\u8bbe\u7f6e"},"\u51c6\u5907\u64cd\u4f5c\uff0c\u961f\u5217\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u56e0\u4e3aspark\u7684\u6267\u884c\u662f\u9700\u8981\u961f\u5217\u7684\u8d44\u6e90\uff0c\u6240\u4ee5\u7528\u6237\u5728\u6267\u884c\u4e4b\u524d\uff0c\u5fc5\u987b\u8981\u8bbe\u7f6e\u81ea\u5df1\u80fd\u591f\u6267\u884c\u7684\u961f\u5217\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(90388).Z})),(0,l.kt)("p",null,"\u56fe3-1 \u961f\u5217\u8bbe\u7f6e\n\u60a8\u4e5f\u53ef\u4ee5\u901a\u8fc7\u5728\u63d0\u4ea4\u53c2\u6570\u7684StartUpMap\u91cc\u9762\u6dfb\u52a0\u961f\u5217\u7684\u503c\uff1a",(0,l.kt)("inlineCode",{parentName:"p"},'startupMap.put("wds.linkis.rm.yarnqueue", "dws")')),(0,l.kt)("h3",{id:"31-\u901a\u8fc7linkis-sdk\u8fdb\u884c\u4f7f\u7528"},"3.1 \u901a\u8fc7Linkis SDK\u8fdb\u884c\u4f7f\u7528"),(0,l.kt)("p",null,"Linkis\u63d0\u4f9b\u4e86Java\u548cScala \u7684SDK\u5411Linkis\u670d\u52a1\u7aef\u63d0\u4ea4\u4efb\u52a1. \u5177\u4f53\u53ef\u4ee5\u53c2\u8003 ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.0/user_guide/sdk_manual"},"JAVA SDK Manual"),".\n\u5bf9\u4e8eSpark\u4efb\u52a1\u4f60\u53ea\u9700\u8981\u4fee\u6539Demo\u4e2d\u7684EngineConnType\u548cCodeType\u53c2\u6570\u5373\u53ef:"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-java"},' Map labels = new HashMap();\n labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label\n labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator\n labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala\n')),(0,l.kt)("h3",{id:"32-\u901a\u8fc7linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"},"3.2 \u901a\u8fc7Linkis-cli\u8fdb\u884c\u4efb\u52a1\u63d0\u4ea4"),(0,l.kt)("p",null,"Linkis 1.0\u540e\u63d0\u4f9b\u4e86cli\u7684\u65b9\u5f0f\u63d0\u4ea4\u4efb\u52a1\uff0c\u6211\u4eec\u53ea\u9700\u8981\u6307\u5b9a\u5bf9\u5e94\u7684EngineConn\u548cCodeType\u6807\u7b7e\u7c7b\u578b\u5373\u53ef\uff0cSpark\u7684\u4f7f\u7528\u5982\u4e0b\uff1a"),(0,l.kt)("pre",null,(0,l.kt)("code",{parentName:"pre",className:"language-shell"},'## codeType\u5bf9\u5e94\u5173\u7cfb py--\x3epyspark sql--\x3esparkSQL scala--\x3eSpark scala\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables" -submitUser hadoop -proxyUser hadoop\n\n# \u53ef\u4ee5\u5728\u63d0\u4ea4\u53c2\u6570\u901a\u8fc7-confMap wds.linkis.yarnqueue=dws \u6765\u6307\u5b9ayarn \u961f\u5217\nsh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=dws -code "show tables" -submitUser hadoop -proxyUser hadoop\n')),(0,l.kt)("p",null,"\u5177\u4f53\u4f7f\u7528\u53ef\u4ee5\u53c2\u8003\uff1a ",(0,l.kt)("a",{parentName:"p",href:"/zh-CN/docs/1.1.0/user_guide/linkiscli_manual"},"Linkis CLI Manual"),"."),(0,l.kt)("h3",{id:"33-scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"},"3.3 Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f"),(0,l.kt)("p",null,"Scriptis\u7684\u4f7f\u7528\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u76f4\u63a5\u8fdb\u5165Scriptis\uff0c\u65b0\u5efasql\u3001scala\u6216\u8005pyspark\u811a\u672c\u8fdb\u884c\u6267\u884c\u3002"),(0,l.kt)("p",null,"sql\u7684\u65b9\u5f0f\u662f\u6700\u7b80\u5355\u7684\uff0c\u60a8\u53ef\u4ee5\u65b0\u5efasql\u811a\u672c\u7136\u540e\u7f16\u5199\u8fdb\u884c\u6267\u884c\uff0c\u6267\u884c\u7684\u65f6\u5019\uff0c\u4f1a\u6709\u8fdb\u5ea6\u7684\u663e\u793a\u3002\u5982\u679c\u4e00\u5f00\u59cb\u7528\u6237\u662f\u6ca1\u6709spark\u5f15\u64ce\u7684\u8bdd\uff0csql\u7684\u6267\u884c\u4f1a\u542f\u52a8\u4e00\u4e2aspark\u4f1a\u8bdd(\u8fd9\u91cc\u53ef\u80fd\u4f1a\u82b1\u4e00\u4e9b\u65f6\u95f4)\uff0c\nSparkSession\u521d\u59cb\u5316\u4e4b\u540e\uff0c\u5c31\u53ef\u4ee5\u5f00\u59cb\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(77236).Z})),(0,l.kt)("p",null,"\u56fe3-2 sparksql\u7684\u6267\u884c\u6548\u679c\u622a\u56fe"),(0,l.kt)("p",null,"spark-scala\u7684\u4efb\u52a1\uff0c\u6211\u4eec\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86sqlContext\u7b49\u53d8\u91cf\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528\u8fd9\u4e2asqlContext\u8fdb\u884csql\u7684\u6267\u884c\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(58881).Z})),(0,l.kt)("p",null,"\u56fe3-3 spark-scala\u7684\u6267\u884c\u6548\u679c\u56fe"),(0,l.kt)("p",null,"\u7c7b\u4f3c\u7684\uff0cpyspark\u7684\u65b9\u5f0f\u4e2d\uff0c\u6211\u4eec\u4e5f\u5df2\u7ecf\u521d\u59cb\u5316\u597d\u4e86SparkSession\uff0c\u7528\u6237\u53ef\u4ee5\u76f4\u63a5\u4f7f\u7528spark.sql\u7684\u65b9\u5f0f\u8fdb\u884c\u6267\u884csql\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(76999).Z}),"\n\u56fe3-4 pyspark\u7684\u6267\u884c\u65b9\u5f0f"),(0,l.kt)("h2",{id:"4spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"},"4.spark\u5f15\u64ce\u7684\u7528\u6237\u8bbe\u7f6e"),(0,l.kt)("p",null,"\u9664\u4e86\u4ee5\u4e0a\u5f15\u64ce\u914d\u7f6e\uff0c\u7528\u6237\u8fd8\u53ef\u4ee5\u8fdb\u884c\u81ea\u5b9a\u4e49\u7684\u8bbe\u7f6e\uff0c\u6bd4\u5982spark\u4f1a\u8bddexecutor\u4e2a\u6570\u548cexecutor\u7684\u5185\u5b58\u3002\u8fd9\u4e9b\u53c2\u6570\u662f\u4e3a\u4e86\u7528\u6237\u80fd\u591f\u66f4\u52a0\u81ea\u7531\u5730\u8bbe\u7f6e\u81ea\u5df1\u7684spark\u7684\u53c2\u6570\uff0c\u53e6\u5916spark\u5176\u4ed6\u53c2\u6570\u4e5f\u53ef\u4ee5\u8fdb\u884c\u4fee\u6539\uff0c\u6bd4\u5982\u7684pyspark\u7684python\u7248\u672c\u7b49\u3002"),(0,l.kt)("p",null,(0,l.kt)("img",{src:t(72746).Z})),(0,l.kt)("p",null,"\u56fe4-1 spark\u7684\u7528\u6237\u81ea\u5b9a\u4e49\u914d\u7f6e\u7ba1\u7406\u53f0"))}c.isMDXComponent=!0},76999:function(e,n,t){n.Z=t.p+"assets/images/pyspakr-run-39cd0bbe6c61d2fc7ad933db99c33d06.png"},90388:function(e,n,t){n.Z=t.p+"assets/images/queue-set-e89c51e5b7d25d78a78580b122e4e64c.png"},58881:function(e,n,t){n.Z=t.p+"assets/images/scala-run-77cd49935a85082d9346d28f3ecf44e3.png"},72746:function(e,n,t){n.Z=t.p+"assets/images/spark-conf-2b013d6df48bcafd6b6b672f44039eab.png"},77236:function(e,n,t){n.Z=t.p+"assets/images/sparksql-run-d748d4fab0548fa92a6e91f42c911466.png"}}]); \ No newline at end of file diff --git a/zh-CN/assets/js/runtime~main.ed9b4782.js b/zh-CN/assets/js/runtime~main.14d5df0c.js similarity index 97% rename from zh-CN/assets/js/runtime~main.ed9b4782.js rename to zh-CN/assets/js/runtime~main.14d5df0c.js index b5509d1aee2..01c1eb6ed42 100644 --- a/zh-CN/assets/js/runtime~main.ed9b4782.js +++ b/zh-CN/assets/js/runtime~main.14d5df0c.js @@ -1 +1 @@ -!function(){"use strict";var e,f,c,a,d,b={},t={};function n(e){var f=t[e];if(void 0!==f)return f.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,n),c.loaded=!0,c.exports}n.m=b,n.c=t,e=[],n.O=function(f,c,a,d){if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(n.O).every((function(e){return n.O[e](c[r])}))?c.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,a,d]},n.n=function(e){var f=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(f,{a:f}),f},c=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};f=f||[null,c({}),c([]),c(c)];for(var t=2&a&&e;"object"==typeof t&&!~f.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((function(f){b[f]=function(){return e[f]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,f){for(var c in f)n.o(f,c)&&!n.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:f[c]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(f,c){return n.f[c](e,f),f}),[]))},n.u=function(e){return"assets/js/"+({346:"8982281e",453:"151a86e1",502:"5f62e57a",786:"d209d9e9",963:"702f4255",974:"c10c3ff8",1036:"5003fc7b",1149:"5ba64b07",1248:"48107a87",1467:"f71d1a0a",1494:"ca7ffba6",1509:"92abb950",1521:"ba21dad4",1860:"4cc6c56b",1955:"15cdadf1",1975:"bee110f4",2265:"d829cefe",2818:"ce8e9344",2853:"168f4b70",2909:"6ddae745",2952:"b2efba3d",3456:"ceb8f0ab",3850:"0e10e9f0",3955:"d35fa7d5",3994:"eb170fbf",4015:"f11e6dfe",4149:"b8b1196d",4194:"73246d8b",4396:"b0a6c3f7",4479:"ecb50835",4494:"c0c5b7b9",4546:"4ddcc5f5",5162:"66523218",5232:"0eb102e3",5255:"c3f04348",5323:"71f86139",5334:"b080a527",5335:"e1b89189",5378:"fa3ac0b4",5503:"66f80fa7",5524:"2aa6ddf9",5588:"f0732286",5596:"e453d605",5892:"e48d35eb",6126:"39729459",6178:"7ebc5e69",6378:"f9b6f49f",6747:"46c60f02",6850:"1cf27110",6954:"e7ffef2b",7470:"56f6e57f",7492:"71a040bb",7498:"4d704204",7532:"5e2a796e",7605:"8713ab72",7914:"03b40afa",8293:"70577794",8743:"de1e1058",8965:"fc118b96",9036:"7a0af4d4",9345:"fb8bd50d",9474:"fe9993b3",9552:"fe0350e5",9632:"905f1251",9738:"cb24547f",10296:"6a66bf3d",10311:"5cd280de",10375:"811185b0",10391:"f12c70ac",10502:"9296efef",10594:"cb19b3af",10865:"558b68dd",10974:"611b4d09",11092:"66596a79",11119:"0252f584",11129:"15ad2644",11152:"8c4e9ef1",11535:"2ea06656",11564:"16e64748",11692:"e22883c8",12235:"39cae327",12357:"9d4b5d2f",12397:"60a01a00",12510:"4c7618e8",12581:"326e3b8c",12651:"bfef765c",12875:"a5707bb5",13036:"32497dc0",13099:"c657088c",13190:"828ffbf8",13438:"410c8754",13751:"3720c009",13831:"8e29cd0e",13847:"02b17c37",13933:"4a4836d7",14380:"928461ed",14400:"0bf94c3e",14657:"27ea2ad4",15129:"d9fea774",15299:"02b56946",16243:"5a278fd4",16286:"9347e21a",16406:"50bc71d4",16435:"b01c97ee",16742:"4c05f83b",16872:"6e46386b",16900:"8e4a9518",17061:"1835b842",17257:"387ebd51",17542:"66d63bfc",17612:"dfc8e523",17765:"db6c92b8",17855:"736ee592",18098:"6f7d9ea9",18166:"13c55284",18465:"faf5a39f",18782:"271d4f19",18855:"9968f92c",19247:"af61ff81",19476:"149fb5a9",19514:"94f6e7c5",19750:"34a37c44",20077:"1547ae4c",20261:"181d09a7",20303:"e30bf350",20337:"2196185d",20369:"5f098bb0",20498:"9ee87b7a",20563:"11d4ed91",20689:"1dd2c16e",20720:"2e35beaa",20761:"570cc32b",20873:"4d5bc9bf",21195:"7f513201",21242:"3f30441e",21369:"795cab7c",21390:"16e826d6",21639:"6ee84144",21759:"b20fb5b1",22032:"844dacdd",22101:"e2c90fd5",22286:"f5146b98",22527:"0e2f1b1d",22528:"48d54923",22686:"c18b3986",22849:"5c1a2740",22933:"c7092f1f",22934:"6ce38115",23075:"69191d03",23089:"18faf279",23117:"7bfd9ab5",23632:"75c45afe",24150:"b2ce4305",24348:"c1193d9a",24396:"1f618053",24514:"d4c55177",24550:"c64310e6",24561:"149e7686",24622:"527c5b3a",24994:"ab388b7d",25019:"53a0ac1c",25045:"cc8750f9",25063:"b3235340",25111:"4a2c7c47",25213:"2fcd5bc4",25284:"fee70dc7",25930:"02f66a1d",26224:"03b1f70b",26234:"1cb5e47c",26247:"866a1030",26324:"39f43530",26649:"c1a6a4cc",26706:"26ddef0e",27099:"32f65daf",27174:"957cd9ed",27598:"8837ae6a",27616:"5fdb3d36",27673:"15d49a70",27867:"03013e92",27918:"17896441",27925:"b104ea62",27957:"6ca50c15",27991:"dc1e40d7",28429:"bfb36362",28448:"898cbc84",28497:"ede6f05e",28633:"59b9dec2",28825:"46dcc3ab",28877:"0affaf4e",29085:"4854afc3",29212:"06a9ee64",29231:"0c159898",29376:"2fb52cdd",29440:"46273c88",29450:"80bee161",29463:"0d1a00ab",29470:"e75be527",29514:"1be78505",29522:"5509d565",29828:"374d152b",30305:"3c6d26f7",30477:"359731ac",30503:"26ae3e5f",30617:"022373a3",30672:"9c997609",30829:"f89165fa",31019:"9a0f7358",31047:"bc0eb055",31206:"30833634",31237:"37fb7ae0",31253:"45f550be",31795:"954c142c",31817:"8a29c50a",31964:"a7eaa5c9",32085:"f11d3660",32336:"764e68f7",32479:"e3dc7569",32617:"b32a71ec",32656:"c134f34e",33405:"2b5b9154",33408:"3f1d99cb",33560:"818823b9",33841:"c00ae604",34304:"f1fbe14b",34414:"aef886b3",34769:"e63926f4",34777:"aed59f8d",34893:"0f1bab08",35003:"43123582",35135:"fb9b1244",35513:"8e846628",35557:"64ed3b8d",35707:"2e1d0e00",35775:"77683134",36180:"baddade3",36311:"a5f6ffc8",36336:"7c63d7b7",36425:"44261163",36511:"b01117d5",37383:"2fbaaf24",37387:"6c9b06a2",37542:"a1466dcd",37797:"79af763e",37976:"00a3bd95",38528:"6daa70f6",38663:"8b63b041",38762:"2fce9687",39158:"877de300",39182:"3e5bd18c",39214:"af574889",39236:"3720b455",39248:"ff7e2f40",39486:"17a37fd7",39638:"0d8abdac",39898:"2a230721",40504:"c634d66a",40512:"5845ef18",40616:"b36e607b",40758:"f0efad3d",41022:"b982bd12",41026:"b3dd23a0",41139:"08074961",41341:"c4acfde6",41468:"04e216b2",42253:"450551e4",42486:"6a7c7d85",43025:"3964d11e",43052:"c1bb201c",43530:"75e407ea",43960:"1cc7dd5e",44172:"88c632e2",44334:"d2c8d872",44394:"1c2fc4cf",44482:"18dd72b8",44696:"0ddecc65",44787:"02df832d",44835:"c678d5ca",44999:"1a93f120",45088:"4233d542",45205:"f5ec55d8",45230:"0ffb9b7e",45407:"cc72fd7a",45487:"0c7dbc5f",45601:"5a7a66a0",45611:"470cc4da",45663:"60f1d01a",45810:"8dc69e89",46023:"6e68be01",46103:"ccc49370",46207:"175db8b4",46220:"0cc84c5f",46516:"4dda80eb",46522:"9ef00cda",46617:"4d8df7e8",46798:"57fd7486",46876:"ccf90adf",47209:"976643f8",47223:"f1295e58",47266:"11e1aecf",47325:"e915bea9",47474:"08915a4a",47611:"b8115b89",47838:"e3315455",47903:"50ef6dc2",48152:"6dbf4be1",48217:"597b902f",48360:"08bd5166",48422:"15cd02d1",48610:"6875c492",48747:"9cc1bad5",48751:"94a0f419",48762:"579b0b82",48826:"a4065928",48932:"248e03f5",49136:"86e0ce03",49222:"ecc3006b",49417:"dc5bddce",49418:"9ae78c43",49476:"65eee9f9",49851:"1b1e6bcf",49893:"f91aa9d3",50027:"38e75aa0",50029:"7cbf873c",50200:"ca0e9c8f",50310:"84e22c91",50531:"21636274",50552:"8f0b7a14",50578:"4f4bfeb0",50603:"89196382",50926:"d4aac00d",50947:"e5e4671e",51030:"9a0e6c1f",51471:"560144f1",51511:"dd4806ef",51702:"066c27d8",51841:"dedbedf9",52058:"058f6c1b",52145:"0ba1b308",52183:"1f0a9aa5",52363:"33e61bb8",52481:"4efc5364",52486:"c7d2638f",52491:"4845ec28",52535:"814f3328",52541:"98157455",52632:"93491d6c",52721:"b88fca75",52989:"6a2e0576",53051:"01ca90a6",53172:"3d2bc35a",53490:"f3d8650d",53608:"9e4087bc",54086:"93ca4beb",54485:"9c417a61",54688:"17f86a2c",54746:"6162fc79",55060:"13fefa32",55171:"4eb638d8",55355:"bfa865b1",55571:"79993517",55938:"a2ce9e02",56230:"33b1402c",56474:"5f634645",56967:"5ff4ba61",57233:"5d3f7811",57253:"33b0f542",57413:"09b37ae7",57463:"7edd234c",57934:"af138731",58146:"410fbc48",58326:"e15bcb33",58474:"11453aa0",58556:"af186705",58576:"07a8ac53",58626:"3bd6e845",58940:"c7d6b528",59365:"9194fa03",59840:"5b29caaf",60035:"7e73bb19",60060:"bd46bd94",60309:"a525c01a",60418:"02ffda68",60438:"5839c5e6",60452:"bf8803da",60470:"9d39ae71",60483:"03f643c5",60975:"3a689c31",61126:"c24c40c9",61194:"aa6c625d",61312:"051f0cab",61576:"390548a3",61925:"a472dc25",61945:"0b37ed67",62076:"43f15380",62823:"4ea94e33",62888:"a332ba00",63013:"b2e90211",63020:"1667253d",63280:"2e842b4c",63339:"cc3d931f",63495:"f70a4257",63718:"9c98909f",63905:"d4c73231",64013:"01a85c17",64233:"895f9d8e",64380:"ea1c9497",64676:"ff917c93",64772:"729835ef",64885:"7f22ac22",64927:"b525f102",64949:"364dd0b4",65014:"2b57c3c9",65057:"5078bbef",65197:"8d998be3",65201:"f417129b",65290:"f0b63de6",65309:"e2382f3b",65437:"946bbf55",65544:"30329f3c",65760:"99c5b032",65808:"de7cc143",66017:"ae0ce2bf",66185:"ef38fca0",66465:"eef6ba27",66553:"963f614e",66584:"4724f855",66674:"7cc42675",66705:"adaf5b5c",66873:"28aafd8c",66937:"806f9cca",67235:"ba04e7c1",67317:"16690b75",67780:"925d8af7",68065:"4bf23d2c",68281:"25d494a0",68505:"faf61c95",68651:"f257988d",68798:"a86749d0",69012:"581dea95",69045:"74515d61",69347:"58fe5e6e",69778:"38ddf9f9",70066:"e570e820",70126:"eccc351c",70251:"4a05a5a1",70269:"23782f26",70419:"441dbced",70659:"f5c88462",70971:"00f88e88",71075:"1b8561f3",71341:"333c24af",71695:"124cb083",71786:"3a9af82d",71960:"b0cae314",72024:"d8f0341f",72104:"808e96ea",72142:"87089b5f",72146:"8a4d686c",72157:"3b36ad21",72163:"41c6bdc3",72216:"35c72bf3",72371:"b9a8e5b1",72487:"916b15b1",72747:"0b319c4d",73033:"2c7f1c11",73255:"fe07bdbe",73289:"2993eb6c",73297:"da9d9d40",73333:"92f7f021",73544:"a5a1aefc",73556:"c3b6f34c",73736:"21ceaf0b",73906:"0e434e9f",73997:"9ce623e0",74121:"55960ee5",74231:"e5493fbb",74818:"785d2d5e",74939:"b674895b",75e3:"771623b5",75110:"1fb7a886",75148:"4ea105bf",75216:"c8f808cb",75583:"1adbb4a7",75605:"d56b9b04",75692:"ff591fbc",76060:"65008442",76363:"29641b83",76437:"44d531a0",76620:"f3938e37",77426:"48aac528",77902:"2cf7d993",77952:"e19d0691",78029:"77816f9e",78330:"caec546c",78454:"1edc715e",78485:"9694c975",78761:"afa0b998",78980:"c1063e9f",79691:"ad663fde",79694:"2c02d8be",79938:"d54637cb",79999:"0632d5e4",80053:"935f2afb",80062:"cfc0c37c",80263:"4762897f",80372:"a3c8c7c2",80733:"9b6ad22f",80748:"de6cfe3a",81093:"e041305e",81786:"3800ddde",82060:"f5df6522",82173:"d3152fab",82271:"4aa0eb50",82400:"359d4f76",82444:"9eafe30c",82641:"54a0eb18",82682:"6d8bdd90",82717:"6054d46c",82841:"1f97f226",82859:"c5fc055b",83331:"b320c360",83341:"60ffc9e9",83430:"589616dd",83713:"5d6f0cba",84014:"64fcac21",84340:"27b439bc",84905:"f915e645",85181:"e0c93076",86015:"f36e204c",86037:"e8cfdebd",86143:"b40fd1d6",86264:"02bbd093",86607:"01304813",86743:"9a647680",86932:"47a075bf",87002:"a4e3a305",87054:"9dd8a0d2",87164:"2864ec2f",87253:"f81106e5",88290:"d0c6b3c0",88423:"cdf529de",88426:"48422a68",88463:"115c1cc6",88629:"27dbc74e",88733:"b80bd506",89339:"641be88a",89520:"24327667",89820:"50c09d93",89846:"3010b6c6",89926:"13e8b6e8",90241:"820f361b",90330:"0c38459a",90391:"283536cd",90406:"023e064d",90538:"de271c79",90867:"b77d38b5",91073:"f4c75e9f",91542:"146c40a3",91755:"460e656a",91977:"41fe0b81",92074:"8903e609",92132:"dbeafd1b",92489:"14d47647",93089:"a6aa9e1f",93115:"cb951476",93259:"c1fd58a9",93302:"7148444c",93377:"1426a1d7",93380:"0f458f65",93591:"d6321c51",93735:"6b62a9a4",93803:"70a4d7d3",93929:"2a665aa1",93962:"c0c74b72",93979:"99a30ab3",94138:"5df84640",94141:"fece5140",94172:"439ddd83",94195:"ae1d45b8",94424:"a08e4012",94439:"f4730b20",94478:"ce55c90c",94629:"caa9028b",94640:"fb0c6c89",94791:"cd72fc6f",95012:"27d06669",95086:"31601111",95260:"f2e34371",95369:"39a7dcea",96136:"e1f07afe",96188:"20ab1817",96247:"b12f6675",96369:"397839d3",96739:"eb05c290",96938:"61198ef2",96992:"c456f623",97451:"b62874ee",97507:"cb9d8c24",97597:"8cc02d9f",97616:"306a8c6c",97622:"64db7480",97689:"bdca809f",97892:"be3cf78a",97899:"6fc19996",98091:"1f71503e",98094:"f4c00f3f",98381:"1a35bc33",98647:"ddb6fedf",98651:"03b29b8f",98953:"a79652e8",98959:"01a26e04",99056:"dc1a190b",99181:"530ea569",99348:"52286226",99548:"5414ec7d",99553:"4fd00f9f",99646:"84447780",99670:"c9ae514c",99722:"22aae707",99924:"df203c0f",99944:"390879c4"}[e]||e)+"."+{346:"1f8806d9",453:"cf6598d5",502:"709ecf39",786:"ce2181da",963:"8c38015f",974:"804367e7",1036:"ad7eab74",1149:"0c434cbd",1248:"0e976e98",1467:"f2b51243",1494:"dd7c57bc",1509:"8c65da09",1521:"8c5d9eab",1860:"31e6d7cf",1955:"d729c497",1975:"e3df88a4",2265:"3731095d",2818:"40d51f94",2853:"05d75d7a",2909:"10ab86d1",2952:"398746f7",3456:"5f329077",3829:"2a47bdd2",3850:"f24e9ea5",3955:"cdbe4f15",3994:"ec629861",4015:"83b8dcf5",4149:"4044399a",4194:"52297fcb",4396:"53e1a778",4479:"60bc4084",4494:"b0463345",4546:"8c4831ad",5162:"32249d3e",5232:"a0372713",5255:"f0513f5e",5323:"937d093d",5334:"12fbde0e",5335:"22c1001e",5378:"db5aa732",5503:"184f212f",5524:"d6a7cd5e",5588:"31129896",5596:"0573fc62",5892:"ed084295",6126:"3f89dfbe",6178:"998126a7",6378:"c0fa579c",6747:"f79a77da",6850:"ac85f1b3",6954:"8bbec96d",7470:"dc18e01e",7492:"94b9edad",7498:"17ca6c8b",7532:"189c1f94",7605:"9d64be80",7914:"d05e29a8",8293:"79422ad7",8743:"ce0a3d8d",8965:"50b75080",9036:"9748d3a8",9345:"b05fd722",9474:"39ae0fdf",9552:"942572bb",9632:"6f8c52c8",9738:"33b93deb",10296:"d6aa94a4",10311:"387390fc",10375:"54414020",10391:"120475ce",10502:"fb8b2ceb",10594:"b3f1ad00",10865:"3803c70a",10972:"bfd6544a",10974:"2e4485e3",11092:"57b26c80",11119:"48cbe33f",11129:"30252ad2",11152:"a588ed25",11535:"67dad406",11564:"4cc893e5",11692:"31b54503",12235:"d611346a",12357:"c12d27b1",12397:"c74bd93c",12510:"b481946f",12581:"9a747c14",12651:"85ca15c0",12875:"d948765a",13036:"0ffd1cee",13099:"07681f7d",13190:"f5c51fd0",13438:"afc9072a",13751:"1f2d2496",13831:"3e79aa5d",13847:"6bd39754",13933:"b927c7e7",14380:"c637bfa1",14400:"72bf05e7",14657:"99a5eafe",15129:"5310dbdd",15299:"cd23ea90",16243:"35a709a7",16286:"951b50a9",16406:"0c13ab47",16435:"76f0dc77",16742:"59b8b60f",16872:"3934d90a",16900:"d6fabf42",17061:"3b9f6f7b",17257:"bae69e9f",17542:"c54e0a18",17612:"98f91be4",17765:"eca8f084",17855:"9c97fd42",18098:"eac03630",18166:"7295783b",18465:"c06b8d73",18782:"cb26705f",18855:"cd86d191",19247:"032387fb",19476:"6f5b0062",19514:"e410faab",19750:"4cab4dfe",20077:"86105e7e",20261:"8b88084f",20303:"304caa61",20337:"e8cfc6fa",20369:"cfb3b521",20498:"c0a3f19a",20563:"959811a9",20689:"32851d76",20720:"d5988fc6",20761:"57ce7d67",20873:"fb57a675",21195:"0eedc7b1",21242:"e58a9470",21369:"fcb9f837",21390:"4c69a3db",21639:"417032f5",21759:"152cb3fa",22032:"d6c07f5e",22101:"0a015cbc",22286:"24cbda1e",22527:"0e0100ce",22528:"1124deae",22686:"f28202a6",22849:"ed888e86",22933:"df268f64",22934:"eaa25d58",23075:"db66a0f0",23089:"e614c53a",23117:"37fff2d9",23632:"9431d38e",24150:"01dce3af",24348:"ac282e2a",24396:"d5d5857a",24514:"f91686f0",24550:"f1d7bb46",24561:"fa8770bc",24608:"9c4d2d11",24622:"9be3798b",24994:"a6c205a4",25019:"b8176936",25045:"e1d428c7",25063:"6882775d",25111:"0ef715d0",25213:"7ce4da41",25284:"248cfbef",25930:"d1c103c7",26224:"95bac4e1",26234:"c63994dc",26247:"51def8aa",26324:"9ebe0597",26649:"5e8c49a1",26706:"ea4aec80",27099:"753759ab",27174:"36399036",27598:"8b29878d",27616:"d65abce8",27673:"4a5ab75a",27867:"d7d79eac",27918:"d0a8d8ed",27925:"2a5c16a8",27957:"428dabaf",27991:"3772c88a",28429:"2a876683",28448:"2370a249",28497:"a2108344",28633:"c0737130",28825:"f054df4b",28877:"aa33bea7",29085:"6c4ec725",29212:"873bcf3d",29231:"6939bb2d",29376:"64367516",29440:"3524e61f",29450:"5c29a9e2",29463:"eb53093a",29470:"978fbb98",29514:"c2b68c2b",29522:"1f0699da",29828:"cbb37427",30305:"b0aa0385",30477:"32827fa2",30503:"11fefa4c",30617:"7f954424",30672:"f5f96664",30829:"3dedc35d",31019:"8f43ab7e",31047:"4f3b0772",31206:"a3eb5a8d",31237:"c896d9a9",31253:"7e8f17df",31795:"ec5d2432",31817:"65893f96",31964:"9d711b7c",32085:"f44f9cc8",32336:"a05fd6c5",32479:"3fbd6403",32617:"e0f5f739",32656:"255dd6af",33405:"fab08588",33408:"b126f228",33560:"3d37821a",33841:"9ee6942d",34304:"b461d927",34414:"bc5e30fc",34769:"a12d4370",34777:"2f8ccea4",34893:"0f6aee44",35003:"fb1a343e",35135:"96f37e66",35513:"a7219aa9",35557:"f360c688",35707:"029a4644",35775:"f748c0f8",36180:"5ab358cc",36311:"81564718",36336:"acee71c5",36425:"1dd5a995",36511:"9d2cb9d7",37383:"19ac5d35",37387:"f9625158",37542:"d7eafe35",37797:"82078eda",37976:"0d4ff4bb",38528:"7ac4ba4f",38663:"bb0dd05b",38762:"f422b7af",39158:"0743994b",39182:"afef0ab8",39214:"081a7273",39236:"480c4cf6",39248:"a15cd856",39486:"936f5cdc",39638:"67cb34c2",39898:"6db57ab4",40504:"5fd6fde6",40512:"894e2f8e",40616:"a02fef48",40758:"41fd29d2",41022:"4520c4f3",41026:"be2d8a72",41139:"6200c46b",41341:"bfb07e6c",41468:"cf983898",42253:"5f93feb7",42486:"3b0a4954",43025:"ef4dbc52",43052:"d9b2dc4a",43530:"686b3cce",43960:"cfe7ba55",44172:"978f1159",44334:"4f670810",44394:"eb2de966",44482:"d16688e4",44696:"e57586f5",44787:"6501b356",44835:"064c936c",44999:"067608a5",45088:"2485e54e",45205:"79890cd1",45230:"bd5dc018",45407:"fce963dd",45487:"40a2ad76",45601:"9de95d06",45611:"bc15d259",45663:"0003e7e5",45810:"f54625ef",46023:"67de0dba",46103:"c269c6ad",46207:"71e7635c",46220:"20914407",46516:"56892399",46522:"0cba3c3b",46617:"9fbb7012",46798:"9cccc16e",46876:"25efa3a7",46945:"4deecdf7",47209:"59af3317",47223:"402d3ee1",47266:"5f467519",47325:"c7ef1185",47474:"d1bd7565",47611:"9765e6b7",47838:"42538b1f",47903:"d93b2136",48152:"afacac05",48217:"4ffb5bb0",48360:"704c48e5",48422:"b76f994b",48610:"884dfaf4",48747:"ec2f42c0",48751:"e0f9acd0",48762:"4a94347e",48826:"e47dee74",48932:"0a2ccfb1",49136:"904801c9",49222:"8d8781fc",49417:"8fbdef48",49418:"e71dddf1",49476:"df608972",49851:"4cc2de52",49893:"9092b94f",50027:"e6625ba7",50029:"76e354d6",50200:"1c08e454",50310:"d5fb960d",50531:"2818e6b2",50552:"6189e28c",50578:"1bbf9848",50603:"f24f693c",50926:"24058230",50947:"0cc3e088",51030:"14ac24a1",51471:"9bb1d895",51511:"0f477ec7",51702:"82cdafbe",51841:"39deda53",52058:"c0596fa2",52145:"d68e05fc",52183:"fca7ce0f",52363:"c6528778",52481:"1dbbbae1",52486:"c6a43c82",52491:"0ab03d24",52535:"01cc4554",52541:"fdf837d2",52632:"e91aad0d",52721:"ff0e49a9",52989:"ac234699",53051:"5918ab88",53172:"33e5955c",53490:"f2bc0bca",53608:"51fbf6e8",54086:"66949672",54485:"1be6dab5",54688:"bf160cb3",54746:"b56f8f53",55040:"d1cb509f",55060:"5bdab039",55171:"7d7a4430",55355:"df622322",55571:"ea16f475",55938:"42f7ca63",56230:"e9cc4f5e",56474:"5bf53cac",56967:"a6085895",57233:"d62a477c",57253:"14b4d2b2",57413:"4065fa7c",57463:"dd42ae19",57934:"2ce29a10",58146:"824e3d99",58326:"53569147",58474:"fc1ecb4b",58556:"16973e4a",58576:"4f21392f",58626:"f9476fe5",58940:"a33481f2",59365:"d4152f8c",59840:"ed93c730",60035:"46d1ee94",60060:"be42bf8a",60309:"425bd2a8",60418:"c9b1742f",60438:"77d29868",60452:"b44200e8",60470:"dc0f453e",60483:"87ce9487",60975:"0954feaa",61126:"85cbd828",61194:"5b8fd1f4",61312:"9176c59d",61576:"3b9fd01a",61925:"3f6ba8ad",61945:"83aeb77c",62076:"120a1f15",62823:"f5a69935",62888:"0631f832",63013:"3537ce72",63020:"e8cb865f",63280:"06acfbeb",63339:"254dc414",63495:"40071eeb",63718:"f5c58fd3",63905:"1678fb8d",64013:"75f40f00",64233:"3a42ff14",64380:"fc449630",64676:"2b568f76",64772:"8b33418b",64885:"1f95b268",64927:"960d51ee",64949:"52ffd2c9",65014:"18a99462",65057:"b5978b91",65197:"63932d60",65201:"d5ccea93",65290:"b49d6695",65309:"b074cdf0",65437:"07404a3f",65544:"d72a4f75",65760:"7926750e",65808:"a6b12c86",66017:"58f22b3a",66185:"c79eca9c",66465:"c4321497",66553:"9908e4ce",66584:"2c9d8afa",66674:"47627822",66705:"69b35bde",66873:"e38ed296",66937:"b1b19d82",67235:"d2eea1be",67317:"766d9300",67780:"4f2b4634",68065:"430f87ae",68281:"48a47da8",68505:"c9a07adb",68651:"061f8aa6",68798:"2ab61aa9",69012:"e0bcc580",69045:"9e6a3b67",69347:"1db547de",69778:"2d9f2718",70066:"f9ff8f33",70126:"ffc08d3e",70251:"275b3970",70269:"18e07bba",70419:"9825066b",70659:"13ec1e41",70971:"2840b71a",71075:"3a00e42c",71341:"d034f411",71695:"e46e3e4b",71786:"369a75df",71960:"6e67fd41",72024:"b2517c14",72104:"8bf5c4a6",72142:"9c9bd8d9",72146:"f23e6200",72157:"8697cd1b",72163:"871d93fa",72216:"8dfcd8d2",72371:"5b0bc988",72487:"51bec9b9",72747:"e6aaea6d",73033:"7ec9829d",73255:"e753ee18",73289:"fb393ba4",73297:"578d52fc",73333:"9b8fedf6",73544:"3bf081ca",73556:"0633dfe3",73736:"fa002262",73906:"2a0c2848",73997:"ed3354e6",74121:"3f5b9cdc",74231:"90530f95",74818:"b2143f3e",74939:"e0069315",75e3:"c4f5c672",75110:"4100dc62",75148:"bda08611",75216:"4dd080e3",75583:"9a9ae92e",75605:"229635d9",75692:"d0bce39b",76060:"63c1bac4",76363:"19b435c8",76437:"e524c582",76620:"d04f70ab",77426:"2a8f5b60",77902:"469e35f6",77952:"0f2eced3",78029:"077e8f2c",78330:"3b655924",78454:"243e9743",78485:"826d0f64",78761:"fb113da9",78980:"0f7558d6",79691:"b8c2fd0e",79694:"90bedd6e",79938:"f6177cf4",79999:"b2907541",80053:"be506272",80062:"fe21c522",80263:"a06ab1f9",80372:"1ee029b0",80733:"657cfd97",80748:"5a7edca5",81093:"49abe713",81786:"d1d272da",82060:"43ad112d",82173:"58f72bb4",82271:"42bf6897",82400:"ebfd081c",82444:"4c77c723",82641:"8c233587",82682:"ee82c36a",82717:"25829f78",82841:"3607f1bf",82859:"47584a73",83331:"e292aae4",83341:"df2e2a7a",83430:"dbae9ac3",83713:"eeeb7c66",84014:"c818c7b2",84340:"d766fee5",84905:"d463d264",85181:"0ee456e3",86015:"384a73b1",86037:"19da953b",86143:"d886d369",86264:"6bccbacf",86607:"b5bc064b",86743:"1a5aafa2",86932:"3b6982d0",87002:"a0929106",87054:"e59e7145",87164:"bed87687",87253:"65b66fb4",88290:"deebd8d3",88423:"6f24a30a",88426:"2467aaba",88463:"9dc845ad",88629:"49eb4573",88733:"9e1f5970",89339:"f6c096ac",89520:"4a5b6a95",89820:"70b849b0",89846:"4ca4c970",89926:"7d9186b8",90241:"61409d11",90330:"7df4a720",90391:"6efc9024",90406:"4f5c583b",90538:"2b02e08f",90867:"a25f0ead",91073:"aae3a5b6",91542:"16428814",91755:"3952e41c",91977:"af6778c9",92074:"f51839a9",92132:"85988d9b",92489:"038ab22d",93089:"1ea8fcbb",93115:"99e790a3",93259:"3be856d7",93302:"28b09874",93377:"a6fd242d",93380:"df1c9a07",93591:"b351b1a8",93735:"28fb6c5a",93803:"488776d6",93929:"ca72bbb5",93962:"1bd3c69f",93979:"aa8617b2",94138:"fe89382e",94141:"d29d4e8d",94172:"cd23ba4f",94195:"6bbd3a50",94424:"f52c6355",94439:"2149dc0e",94478:"9c5e9030",94629:"2eaf9893",94640:"085012a9",94791:"98522ec5",95012:"f0e1bf8f",95086:"fae61d1a",95260:"573f146c",95369:"87993913",96136:"63754c59",96188:"52a77d95",96247:"3fa61b10",96369:"31df64c6",96739:"522823a5",96938:"fc55850f",96992:"0a998dcd",97451:"91c6999c",97507:"79735d81",97597:"ed8ae9e8",97616:"7dbcaa4f",97622:"5a16c207",97689:"64d0c3a5",97892:"38c82c13",97899:"9dc5e1cd",98091:"f15d3941",98094:"b271d297",98381:"76ca2bf9",98647:"956d09f7",98651:"23f302c8",98953:"83d60e10",98959:"963763fc",99056:"93e03b8e",99181:"b04f2d24",99348:"7be329f9",99548:"361872e3",99553:"0e71de72",99646:"8962e152",99670:"0d439352",99722:"c78385f1",99924:"5ffd65fa",99944:"7deb33f6"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.e9faf9bb.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,f){return Object.prototype.hasOwnProperty.call(e,f)},a={},d="linkis-web-apache:",n.l=function(e,f,c,b){if(a[e])a[e].push(f);else{var t,r;if(void 0!==c)for(var o=document.getElementsByTagName("script"),i=0;i=d)&&Object.keys(n.O).every((function(e){return n.O[e](c[r])}))?c.splice(r--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,a,d]},n.n=function(e){var f=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(f,{a:f}),f},c=Object.getPrototypeOf?function(e){return Object.getPrototypeOf(e)}:function(e){return e.__proto__},n.t=function(e,a){if(1&a&&(e=this(e)),8&a)return e;if("object"==typeof e&&e){if(4&a&&e.__esModule)return e;if(16&a&&"function"==typeof e.then)return e}var d=Object.create(null);n.r(d);var b={};f=f||[null,c({}),c([]),c(c)];for(var t=2&a&&e;"object"==typeof t&&!~f.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((function(f){b[f]=function(){return e[f]}}));return b.default=function(){return e},n.d(d,b),d},n.d=function(e,f){for(var c in f)n.o(f,c)&&!n.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:f[c]})},n.f={},n.e=function(e){return Promise.all(Object.keys(n.f).reduce((function(f,c){return n.f[c](e,f),f}),[]))},n.u=function(e){return"assets/js/"+({346:"8982281e",453:"151a86e1",502:"5f62e57a",786:"d209d9e9",963:"702f4255",974:"c10c3ff8",1036:"5003fc7b",1149:"5ba64b07",1248:"48107a87",1467:"f71d1a0a",1494:"ca7ffba6",1509:"92abb950",1521:"ba21dad4",1860:"4cc6c56b",1955:"15cdadf1",1975:"bee110f4",2265:"d829cefe",2818:"ce8e9344",2853:"168f4b70",2909:"6ddae745",2952:"b2efba3d",3456:"ceb8f0ab",3850:"0e10e9f0",3955:"d35fa7d5",3994:"eb170fbf",4015:"f11e6dfe",4149:"b8b1196d",4194:"73246d8b",4396:"b0a6c3f7",4479:"ecb50835",4494:"c0c5b7b9",4546:"4ddcc5f5",5162:"66523218",5232:"0eb102e3",5255:"c3f04348",5323:"71f86139",5334:"b080a527",5335:"e1b89189",5378:"fa3ac0b4",5503:"66f80fa7",5524:"2aa6ddf9",5588:"f0732286",5596:"e453d605",5892:"e48d35eb",6126:"39729459",6178:"7ebc5e69",6378:"f9b6f49f",6747:"46c60f02",6850:"1cf27110",6954:"e7ffef2b",7470:"56f6e57f",7492:"71a040bb",7498:"4d704204",7532:"5e2a796e",7605:"8713ab72",7914:"03b40afa",8293:"70577794",8743:"de1e1058",8965:"fc118b96",9036:"7a0af4d4",9345:"fb8bd50d",9474:"fe9993b3",9552:"fe0350e5",9632:"905f1251",9738:"cb24547f",10296:"6a66bf3d",10311:"5cd280de",10375:"811185b0",10391:"f12c70ac",10502:"9296efef",10594:"cb19b3af",10865:"558b68dd",10974:"611b4d09",11092:"66596a79",11119:"0252f584",11129:"15ad2644",11152:"8c4e9ef1",11535:"2ea06656",11564:"16e64748",11692:"e22883c8",12235:"39cae327",12357:"9d4b5d2f",12397:"60a01a00",12510:"4c7618e8",12581:"326e3b8c",12651:"bfef765c",12875:"a5707bb5",13036:"32497dc0",13099:"c657088c",13190:"828ffbf8",13438:"410c8754",13751:"3720c009",13831:"8e29cd0e",13847:"02b17c37",13933:"4a4836d7",14380:"928461ed",14400:"0bf94c3e",14657:"27ea2ad4",15129:"d9fea774",15299:"02b56946",16243:"5a278fd4",16286:"9347e21a",16406:"50bc71d4",16435:"b01c97ee",16742:"4c05f83b",16872:"6e46386b",16900:"8e4a9518",17061:"1835b842",17257:"387ebd51",17542:"66d63bfc",17612:"dfc8e523",17765:"db6c92b8",17855:"736ee592",18098:"6f7d9ea9",18166:"13c55284",18465:"faf5a39f",18782:"271d4f19",18855:"9968f92c",19247:"af61ff81",19476:"149fb5a9",19514:"94f6e7c5",19750:"34a37c44",20077:"1547ae4c",20261:"181d09a7",20303:"e30bf350",20337:"2196185d",20369:"5f098bb0",20498:"9ee87b7a",20563:"11d4ed91",20689:"1dd2c16e",20720:"2e35beaa",20761:"570cc32b",20873:"4d5bc9bf",21195:"7f513201",21242:"3f30441e",21369:"795cab7c",21390:"16e826d6",21639:"6ee84144",21759:"b20fb5b1",22032:"844dacdd",22101:"e2c90fd5",22286:"f5146b98",22527:"0e2f1b1d",22528:"48d54923",22686:"c18b3986",22849:"5c1a2740",22933:"c7092f1f",22934:"6ce38115",23075:"69191d03",23089:"18faf279",23117:"7bfd9ab5",23632:"75c45afe",24150:"b2ce4305",24348:"c1193d9a",24396:"1f618053",24514:"d4c55177",24550:"c64310e6",24561:"149e7686",24622:"527c5b3a",24994:"ab388b7d",25019:"53a0ac1c",25045:"cc8750f9",25063:"b3235340",25111:"4a2c7c47",25213:"2fcd5bc4",25284:"fee70dc7",25930:"02f66a1d",26224:"03b1f70b",26234:"1cb5e47c",26247:"866a1030",26324:"39f43530",26649:"c1a6a4cc",26706:"26ddef0e",27099:"32f65daf",27174:"957cd9ed",27598:"8837ae6a",27616:"5fdb3d36",27673:"15d49a70",27867:"03013e92",27918:"17896441",27925:"b104ea62",27957:"6ca50c15",27991:"dc1e40d7",28429:"bfb36362",28448:"898cbc84",28497:"ede6f05e",28633:"59b9dec2",28825:"46dcc3ab",28877:"0affaf4e",29085:"4854afc3",29212:"06a9ee64",29231:"0c159898",29376:"2fb52cdd",29440:"46273c88",29450:"80bee161",29463:"0d1a00ab",29470:"e75be527",29514:"1be78505",29522:"5509d565",29828:"374d152b",30305:"3c6d26f7",30477:"359731ac",30503:"26ae3e5f",30617:"022373a3",30672:"9c997609",30829:"f89165fa",31019:"9a0f7358",31047:"bc0eb055",31206:"30833634",31237:"37fb7ae0",31253:"45f550be",31795:"954c142c",31817:"8a29c50a",31964:"a7eaa5c9",32085:"f11d3660",32336:"764e68f7",32479:"e3dc7569",32617:"b32a71ec",32656:"c134f34e",33405:"2b5b9154",33408:"3f1d99cb",33560:"818823b9",33841:"c00ae604",34304:"f1fbe14b",34414:"aef886b3",34769:"e63926f4",34777:"aed59f8d",34893:"0f1bab08",35003:"43123582",35135:"fb9b1244",35513:"8e846628",35557:"64ed3b8d",35707:"2e1d0e00",35775:"77683134",36180:"baddade3",36311:"a5f6ffc8",36336:"7c63d7b7",36425:"44261163",36511:"b01117d5",37383:"2fbaaf24",37387:"6c9b06a2",37542:"a1466dcd",37797:"79af763e",37976:"00a3bd95",38528:"6daa70f6",38663:"8b63b041",38762:"2fce9687",39158:"877de300",39182:"3e5bd18c",39214:"af574889",39236:"3720b455",39248:"ff7e2f40",39486:"17a37fd7",39638:"0d8abdac",39898:"2a230721",40504:"c634d66a",40512:"5845ef18",40616:"b36e607b",40758:"f0efad3d",41022:"b982bd12",41026:"b3dd23a0",41139:"08074961",41341:"c4acfde6",41468:"04e216b2",42253:"450551e4",42486:"6a7c7d85",43025:"3964d11e",43052:"c1bb201c",43530:"75e407ea",43960:"1cc7dd5e",44172:"88c632e2",44334:"d2c8d872",44394:"1c2fc4cf",44482:"18dd72b8",44696:"0ddecc65",44787:"02df832d",44835:"c678d5ca",44999:"1a93f120",45088:"4233d542",45205:"f5ec55d8",45230:"0ffb9b7e",45407:"cc72fd7a",45487:"0c7dbc5f",45601:"5a7a66a0",45611:"470cc4da",45663:"60f1d01a",45810:"8dc69e89",46023:"6e68be01",46103:"ccc49370",46207:"175db8b4",46220:"0cc84c5f",46516:"4dda80eb",46522:"9ef00cda",46617:"4d8df7e8",46798:"57fd7486",46876:"ccf90adf",47209:"976643f8",47223:"f1295e58",47266:"11e1aecf",47325:"e915bea9",47474:"08915a4a",47611:"b8115b89",47838:"e3315455",47903:"50ef6dc2",48152:"6dbf4be1",48217:"597b902f",48360:"08bd5166",48422:"15cd02d1",48610:"6875c492",48747:"9cc1bad5",48751:"94a0f419",48762:"579b0b82",48826:"a4065928",48932:"248e03f5",49136:"86e0ce03",49222:"ecc3006b",49417:"dc5bddce",49418:"9ae78c43",49476:"65eee9f9",49851:"1b1e6bcf",49893:"f91aa9d3",50027:"38e75aa0",50029:"7cbf873c",50200:"ca0e9c8f",50310:"84e22c91",50531:"21636274",50552:"8f0b7a14",50578:"4f4bfeb0",50603:"89196382",50926:"d4aac00d",50947:"e5e4671e",51030:"9a0e6c1f",51471:"560144f1",51511:"dd4806ef",51702:"066c27d8",51841:"dedbedf9",52058:"058f6c1b",52145:"0ba1b308",52183:"1f0a9aa5",52363:"33e61bb8",52481:"4efc5364",52486:"c7d2638f",52491:"4845ec28",52535:"814f3328",52541:"98157455",52632:"93491d6c",52721:"b88fca75",52989:"6a2e0576",53051:"01ca90a6",53172:"3d2bc35a",53490:"f3d8650d",53608:"9e4087bc",54086:"93ca4beb",54485:"9c417a61",54688:"17f86a2c",54746:"6162fc79",55060:"13fefa32",55171:"4eb638d8",55355:"bfa865b1",55571:"79993517",55938:"a2ce9e02",56230:"33b1402c",56474:"5f634645",56967:"5ff4ba61",57233:"5d3f7811",57253:"33b0f542",57413:"09b37ae7",57463:"7edd234c",57934:"af138731",58146:"410fbc48",58326:"e15bcb33",58474:"11453aa0",58556:"af186705",58576:"07a8ac53",58626:"3bd6e845",58940:"c7d6b528",59365:"9194fa03",59840:"5b29caaf",60035:"7e73bb19",60060:"bd46bd94",60309:"a525c01a",60418:"02ffda68",60438:"5839c5e6",60452:"bf8803da",60470:"9d39ae71",60483:"03f643c5",60975:"3a689c31",61126:"c24c40c9",61194:"aa6c625d",61312:"051f0cab",61576:"390548a3",61925:"a472dc25",61945:"0b37ed67",62076:"43f15380",62823:"4ea94e33",62888:"a332ba00",63013:"b2e90211",63020:"1667253d",63280:"2e842b4c",63339:"cc3d931f",63495:"f70a4257",63718:"9c98909f",63905:"d4c73231",64013:"01a85c17",64233:"895f9d8e",64380:"ea1c9497",64676:"ff917c93",64772:"729835ef",64885:"7f22ac22",64927:"b525f102",64949:"364dd0b4",65014:"2b57c3c9",65057:"5078bbef",65197:"8d998be3",65201:"f417129b",65290:"f0b63de6",65309:"e2382f3b",65437:"946bbf55",65544:"30329f3c",65760:"99c5b032",65808:"de7cc143",66017:"ae0ce2bf",66185:"ef38fca0",66465:"eef6ba27",66553:"963f614e",66584:"4724f855",66674:"7cc42675",66705:"adaf5b5c",66873:"28aafd8c",66937:"806f9cca",67235:"ba04e7c1",67317:"16690b75",67780:"925d8af7",68065:"4bf23d2c",68281:"25d494a0",68505:"faf61c95",68651:"f257988d",68798:"a86749d0",69012:"581dea95",69045:"74515d61",69347:"58fe5e6e",69778:"38ddf9f9",70066:"e570e820",70126:"eccc351c",70251:"4a05a5a1",70269:"23782f26",70419:"441dbced",70659:"f5c88462",70971:"00f88e88",71075:"1b8561f3",71341:"333c24af",71695:"124cb083",71786:"3a9af82d",71960:"b0cae314",72024:"d8f0341f",72104:"808e96ea",72142:"87089b5f",72146:"8a4d686c",72157:"3b36ad21",72163:"41c6bdc3",72216:"35c72bf3",72371:"b9a8e5b1",72487:"916b15b1",72747:"0b319c4d",73033:"2c7f1c11",73255:"fe07bdbe",73289:"2993eb6c",73297:"da9d9d40",73333:"92f7f021",73544:"a5a1aefc",73556:"c3b6f34c",73736:"21ceaf0b",73906:"0e434e9f",73997:"9ce623e0",74121:"55960ee5",74231:"e5493fbb",74818:"785d2d5e",74939:"b674895b",75e3:"771623b5",75110:"1fb7a886",75148:"4ea105bf",75216:"c8f808cb",75583:"1adbb4a7",75605:"d56b9b04",75692:"ff591fbc",76060:"65008442",76363:"29641b83",76437:"44d531a0",76620:"f3938e37",77426:"48aac528",77902:"2cf7d993",77952:"e19d0691",78029:"77816f9e",78330:"caec546c",78454:"1edc715e",78485:"9694c975",78761:"afa0b998",78980:"c1063e9f",79691:"ad663fde",79694:"2c02d8be",79938:"d54637cb",79999:"0632d5e4",80053:"935f2afb",80062:"cfc0c37c",80263:"4762897f",80372:"a3c8c7c2",80733:"9b6ad22f",80748:"de6cfe3a",81093:"e041305e",81786:"3800ddde",82060:"f5df6522",82173:"d3152fab",82271:"4aa0eb50",82400:"359d4f76",82444:"9eafe30c",82641:"54a0eb18",82682:"6d8bdd90",82717:"6054d46c",82841:"1f97f226",82859:"c5fc055b",83331:"b320c360",83341:"60ffc9e9",83430:"589616dd",83713:"5d6f0cba",84014:"64fcac21",84340:"27b439bc",84905:"f915e645",85181:"e0c93076",86015:"f36e204c",86037:"e8cfdebd",86143:"b40fd1d6",86264:"02bbd093",86607:"01304813",86743:"9a647680",86932:"47a075bf",87002:"a4e3a305",87054:"9dd8a0d2",87164:"2864ec2f",87253:"f81106e5",88290:"d0c6b3c0",88423:"cdf529de",88426:"48422a68",88463:"115c1cc6",88629:"27dbc74e",88733:"b80bd506",89339:"641be88a",89520:"24327667",89820:"50c09d93",89846:"3010b6c6",89926:"13e8b6e8",90241:"820f361b",90330:"0c38459a",90391:"283536cd",90406:"023e064d",90538:"de271c79",90867:"b77d38b5",91073:"f4c75e9f",91542:"146c40a3",91755:"460e656a",91977:"41fe0b81",92074:"8903e609",92132:"dbeafd1b",92489:"14d47647",93089:"a6aa9e1f",93115:"cb951476",93259:"c1fd58a9",93302:"7148444c",93377:"1426a1d7",93380:"0f458f65",93591:"d6321c51",93735:"6b62a9a4",93803:"70a4d7d3",93929:"2a665aa1",93962:"c0c74b72",93979:"99a30ab3",94138:"5df84640",94141:"fece5140",94172:"439ddd83",94195:"ae1d45b8",94424:"a08e4012",94439:"f4730b20",94478:"ce55c90c",94629:"caa9028b",94640:"fb0c6c89",94791:"cd72fc6f",95012:"27d06669",95086:"31601111",95260:"f2e34371",95369:"39a7dcea",96136:"e1f07afe",96188:"20ab1817",96247:"b12f6675",96369:"397839d3",96739:"eb05c290",96938:"61198ef2",96992:"c456f623",97451:"b62874ee",97507:"cb9d8c24",97597:"8cc02d9f",97616:"306a8c6c",97622:"64db7480",97689:"bdca809f",97892:"be3cf78a",97899:"6fc19996",98091:"1f71503e",98094:"f4c00f3f",98381:"1a35bc33",98647:"ddb6fedf",98651:"03b29b8f",98953:"a79652e8",98959:"01a26e04",99056:"dc1a190b",99181:"530ea569",99348:"52286226",99548:"5414ec7d",99553:"4fd00f9f",99646:"84447780",99670:"c9ae514c",99722:"22aae707",99924:"df203c0f",99944:"390879c4"}[e]||e)+"."+{346:"1f8806d9",453:"cf6598d5",502:"709ecf39",786:"ce2181da",963:"8c38015f",974:"804367e7",1036:"ad7eab74",1149:"0c434cbd",1248:"0e976e98",1467:"f2b51243",1494:"dd7c57bc",1509:"8c65da09",1521:"8c5d9eab",1860:"31e6d7cf",1955:"d729c497",1975:"e3df88a4",2265:"3731095d",2818:"40d51f94",2853:"05d75d7a",2909:"10ab86d1",2952:"398746f7",3456:"5f329077",3829:"2a47bdd2",3850:"f24e9ea5",3955:"cdbe4f15",3994:"ec629861",4015:"83b8dcf5",4149:"4044399a",4194:"52297fcb",4396:"53e1a778",4479:"60bc4084",4494:"b0463345",4546:"8c4831ad",5162:"32249d3e",5232:"a0372713",5255:"f0513f5e",5323:"937d093d",5334:"12fbde0e",5335:"22c1001e",5378:"db5aa732",5503:"184f212f",5524:"d6a7cd5e",5588:"31129896",5596:"0573fc62",5892:"ed084295",6126:"3f89dfbe",6178:"998126a7",6378:"c0fa579c",6747:"f79a77da",6850:"ac85f1b3",6954:"8bbec96d",7470:"dc18e01e",7492:"94b9edad",7498:"17ca6c8b",7532:"189c1f94",7605:"9d64be80",7914:"d05e29a8",8293:"79422ad7",8743:"ce0a3d8d",8965:"50b75080",9036:"9748d3a8",9345:"b05fd722",9474:"39ae0fdf",9552:"942572bb",9632:"6f8c52c8",9738:"33b93deb",10296:"d6aa94a4",10311:"387390fc",10375:"54414020",10391:"120475ce",10502:"fb8b2ceb",10594:"b3f1ad00",10865:"3803c70a",10972:"bfd6544a",10974:"2e4485e3",11092:"57b26c80",11119:"7ca2fa86",11129:"30252ad2",11152:"a588ed25",11535:"67dad406",11564:"4cc893e5",11692:"31b54503",12235:"d611346a",12357:"c12d27b1",12397:"c74bd93c",12510:"b481946f",12581:"9a747c14",12651:"85ca15c0",12875:"d948765a",13036:"0ffd1cee",13099:"07681f7d",13190:"573b541e",13438:"afc9072a",13751:"1f2d2496",13831:"3e79aa5d",13847:"6bd39754",13933:"b927c7e7",14380:"c637bfa1",14400:"72bf05e7",14657:"99a5eafe",15129:"5310dbdd",15299:"cd23ea90",16243:"35a709a7",16286:"951b50a9",16406:"0c13ab47",16435:"76f0dc77",16742:"59b8b60f",16872:"3934d90a",16900:"d6fabf42",17061:"3b9f6f7b",17257:"e1fe5f43",17542:"c54e0a18",17612:"98f91be4",17765:"eca8f084",17855:"9c97fd42",18098:"eac03630",18166:"7295783b",18465:"c06b8d73",18782:"cb26705f",18855:"cd86d191",19247:"032387fb",19476:"6f5b0062",19514:"e410faab",19750:"4cab4dfe",20077:"86105e7e",20261:"8b88084f",20303:"304caa61",20337:"e8cfc6fa",20369:"cfb3b521",20498:"c0a3f19a",20563:"959811a9",20689:"32851d76",20720:"d5988fc6",20761:"57ce7d67",20873:"fb57a675",21195:"0eedc7b1",21242:"e58a9470",21369:"fcb9f837",21390:"4c69a3db",21639:"417032f5",21759:"152cb3fa",22032:"d6c07f5e",22101:"0a015cbc",22286:"24cbda1e",22527:"0e0100ce",22528:"1124deae",22686:"f28202a6",22849:"ed888e86",22933:"df268f64",22934:"eaa25d58",23075:"db66a0f0",23089:"e614c53a",23117:"37fff2d9",23632:"9431d38e",24150:"01dce3af",24348:"ac282e2a",24396:"d5d5857a",24514:"f91686f0",24550:"f1d7bb46",24561:"fa8770bc",24608:"9c4d2d11",24622:"9be3798b",24994:"a6c205a4",25019:"b8176936",25045:"e1d428c7",25063:"6882775d",25111:"0ef715d0",25213:"7ce4da41",25284:"248cfbef",25930:"d1c103c7",26224:"95bac4e1",26234:"c63994dc",26247:"51def8aa",26324:"9ebe0597",26649:"5e8c49a1",26706:"ea4aec80",27099:"753759ab",27174:"36399036",27598:"8b29878d",27616:"d65abce8",27673:"4a5ab75a",27867:"d7d79eac",27918:"d0a8d8ed",27925:"2a5c16a8",27957:"428dabaf",27991:"3772c88a",28429:"2a876683",28448:"2370a249",28497:"a2108344",28633:"c0737130",28825:"f054df4b",28877:"aa33bea7",29085:"6c4ec725",29212:"873bcf3d",29231:"6939bb2d",29376:"64367516",29440:"3524e61f",29450:"5c29a9e2",29463:"eb53093a",29470:"978fbb98",29514:"c2b68c2b",29522:"1f0699da",29828:"cbb37427",30305:"b0aa0385",30477:"32827fa2",30503:"11fefa4c",30617:"7f954424",30672:"f5f96664",30829:"3dedc35d",31019:"8f43ab7e",31047:"4f3b0772",31206:"a3eb5a8d",31237:"c896d9a9",31253:"7e8f17df",31795:"ec5d2432",31817:"65893f96",31964:"9d711b7c",32085:"f44f9cc8",32336:"a05fd6c5",32479:"3fbd6403",32617:"e0f5f739",32656:"255dd6af",33405:"fab08588",33408:"b126f228",33560:"3d37821a",33841:"9ee6942d",34304:"b461d927",34414:"bc5e30fc",34769:"a12d4370",34777:"2f8ccea4",34893:"0f6aee44",35003:"fb1a343e",35135:"96f37e66",35513:"a7219aa9",35557:"f360c688",35707:"029a4644",35775:"f748c0f8",36180:"5ab358cc",36311:"81564718",36336:"acee71c5",36425:"1dd5a995",36511:"9d2cb9d7",37383:"19ac5d35",37387:"f9625158",37542:"d7eafe35",37797:"82078eda",37976:"0d4ff4bb",38528:"7ac4ba4f",38663:"bb0dd05b",38762:"f422b7af",39158:"0743994b",39182:"afef0ab8",39214:"081a7273",39236:"480c4cf6",39248:"a15cd856",39486:"936f5cdc",39638:"67cb34c2",39898:"6db57ab4",40504:"5fd6fde6",40512:"894e2f8e",40616:"a02fef48",40758:"41fd29d2",41022:"4520c4f3",41026:"be2d8a72",41139:"6200c46b",41341:"bfb07e6c",41468:"cf983898",42253:"5f93feb7",42486:"3b0a4954",43025:"ef4dbc52",43052:"d9b2dc4a",43530:"686b3cce",43960:"cfe7ba55",44172:"978f1159",44334:"4f670810",44394:"eb2de966",44482:"d16688e4",44696:"e57586f5",44787:"6501b356",44835:"064c936c",44999:"067608a5",45088:"2485e54e",45205:"79890cd1",45230:"bd5dc018",45407:"fce963dd",45487:"40a2ad76",45601:"9de95d06",45611:"bc15d259",45663:"0003e7e5",45810:"f54625ef",46023:"67de0dba",46103:"c269c6ad",46207:"71e7635c",46220:"20914407",46516:"56892399",46522:"0cba3c3b",46617:"9fbb7012",46798:"9cccc16e",46876:"25efa3a7",46945:"4deecdf7",47209:"59af3317",47223:"402d3ee1",47266:"5f467519",47325:"c7ef1185",47474:"d1bd7565",47611:"9765e6b7",47838:"42538b1f",47903:"d93b2136",48152:"afacac05",48217:"4ffb5bb0",48360:"8f04ad0b",48422:"b76f994b",48610:"884dfaf4",48747:"ec2f42c0",48751:"e0f9acd0",48762:"4a94347e",48826:"e47dee74",48932:"0a2ccfb1",49136:"904801c9",49222:"8d8781fc",49417:"8fbdef48",49418:"e71dddf1",49476:"df608972",49851:"4cc2de52",49893:"9092b94f",50027:"cd8de110",50029:"76e354d6",50200:"1c08e454",50310:"d5fb960d",50531:"2818e6b2",50552:"6189e28c",50578:"1bbf9848",50603:"f24f693c",50926:"24058230",50947:"0cc3e088",51030:"14ac24a1",51471:"9bb1d895",51511:"0f477ec7",51702:"82cdafbe",51841:"5f027f6b",52058:"c0596fa2",52145:"d68e05fc",52183:"fca7ce0f",52363:"c6528778",52481:"1dbbbae1",52486:"c6a43c82",52491:"0ab03d24",52535:"01cc4554",52541:"fdf837d2",52632:"e91aad0d",52721:"ff0e49a9",52989:"ac234699",53051:"5918ab88",53172:"33e5955c",53490:"f2bc0bca",53608:"51fbf6e8",54086:"66949672",54485:"1be6dab5",54688:"bf160cb3",54746:"b56f8f53",55040:"d1cb509f",55060:"5bdab039",55171:"b0cbde23",55355:"df622322",55571:"ea16f475",55938:"42f7ca63",56230:"e9cc4f5e",56474:"5bf53cac",56967:"a6085895",57233:"d62a477c",57253:"14b4d2b2",57413:"4065fa7c",57463:"dd42ae19",57934:"2ce29a10",58146:"824e3d99",58326:"53569147",58474:"fc1ecb4b",58556:"16973e4a",58576:"4f21392f",58626:"f9476fe5",58940:"a33481f2",59365:"d4152f8c",59840:"fa2c5a54",60035:"46d1ee94",60060:"be42bf8a",60309:"425bd2a8",60418:"c9b1742f",60438:"77d29868",60452:"b44200e8",60470:"dc0f453e",60483:"87ce9487",60975:"0954feaa",61126:"85cbd828",61194:"5b8fd1f4",61312:"9176c59d",61576:"3b9fd01a",61925:"3f6ba8ad",61945:"83aeb77c",62076:"120a1f15",62823:"f5a69935",62888:"0631f832",63013:"3537ce72",63020:"e8cb865f",63280:"f4098996",63339:"254dc414",63495:"40071eeb",63718:"f5c58fd3",63905:"1678fb8d",64013:"75f40f00",64233:"3a42ff14",64380:"fc449630",64676:"2b568f76",64772:"8b33418b",64885:"1f95b268",64927:"960d51ee",64949:"52ffd2c9",65014:"18a99462",65057:"b5978b91",65197:"63932d60",65201:"d5ccea93",65290:"b49d6695",65309:"b074cdf0",65437:"07404a3f",65544:"d72a4f75",65760:"7926750e",65808:"a6b12c86",66017:"58f22b3a",66185:"c79eca9c",66465:"c4321497",66553:"9908e4ce",66584:"2c9d8afa",66674:"47627822",66705:"69b35bde",66873:"e38ed296",66937:"b1b19d82",67235:"d2eea1be",67317:"766d9300",67780:"4f2b4634",68065:"430f87ae",68281:"48a47da8",68505:"c9a07adb",68651:"061f8aa6",68798:"2ab61aa9",69012:"e0bcc580",69045:"9e6a3b67",69347:"1db547de",69778:"2d9f2718",70066:"f9ff8f33",70126:"ffc08d3e",70251:"275b3970",70269:"18e07bba",70419:"9825066b",70659:"13ec1e41",70971:"2840b71a",71075:"5a69385b",71341:"d034f411",71695:"e46e3e4b",71786:"369a75df",71960:"6e67fd41",72024:"b2517c14",72104:"8bf5c4a6",72142:"9c9bd8d9",72146:"608ef6fb",72157:"8697cd1b",72163:"871d93fa",72216:"8dfcd8d2",72371:"5b0bc988",72487:"51bec9b9",72747:"e6aaea6d",73033:"7ec9829d",73255:"e753ee18",73289:"40c305aa",73297:"578d52fc",73333:"9b8fedf6",73544:"3bf081ca",73556:"0633dfe3",73736:"fa002262",73906:"2a0c2848",73997:"ed3354e6",74121:"3f5b9cdc",74231:"90530f95",74818:"b2143f3e",74939:"e0069315",75e3:"c4f5c672",75110:"4100dc62",75148:"bda08611",75216:"4dd080e3",75583:"9a9ae92e",75605:"229635d9",75692:"d0bce39b",76060:"63c1bac4",76363:"19b435c8",76437:"e524c582",76620:"d04f70ab",77426:"2a8f5b60",77902:"469e35f6",77952:"0f2eced3",78029:"077e8f2c",78330:"3b655924",78454:"243e9743",78485:"826d0f64",78761:"fb113da9",78980:"0f7558d6",79691:"b8c2fd0e",79694:"90bedd6e",79938:"f6177cf4",79999:"b2907541",80053:"be506272",80062:"fe21c522",80263:"a06ab1f9",80372:"1ee029b0",80733:"657cfd97",80748:"5a7edca5",81093:"49abe713",81786:"d1d272da",82060:"43ad112d",82173:"58f72bb4",82271:"42bf6897",82400:"ebfd081c",82444:"4c77c723",82641:"8c233587",82682:"ee82c36a",82717:"25829f78",82841:"3607f1bf",82859:"47584a73",83331:"e292aae4",83341:"df2e2a7a",83430:"dbae9ac3",83713:"eeeb7c66",84014:"c818c7b2",84340:"d766fee5",84905:"d463d264",85181:"0ee456e3",86015:"384a73b1",86037:"19da953b",86143:"d886d369",86264:"6bccbacf",86607:"b5bc064b",86743:"1a5aafa2",86932:"3b6982d0",87002:"a0929106",87054:"d5b54caf",87164:"bed87687",87253:"65b66fb4",88290:"deebd8d3",88423:"6f24a30a",88426:"2467aaba",88463:"9dc845ad",88629:"49eb4573",88733:"9e1f5970",89339:"f6c096ac",89520:"4a5b6a95",89820:"70b849b0",89846:"4ca4c970",89926:"7d9186b8",90241:"61409d11",90330:"7df4a720",90391:"6efc9024",90406:"4f5c583b",90538:"2b02e08f",90867:"a25f0ead",91073:"aae3a5b6",91542:"16428814",91755:"3952e41c",91977:"af6778c9",92074:"f51839a9",92132:"85988d9b",92489:"038ab22d",93089:"1ea8fcbb",93115:"99e790a3",93259:"3be856d7",93302:"28b09874",93377:"a6fd242d",93380:"df1c9a07",93591:"b351b1a8",93735:"28fb6c5a",93803:"488776d6",93929:"ca72bbb5",93962:"1bd3c69f",93979:"aa8617b2",94138:"fe89382e",94141:"d29d4e8d",94172:"cd23ba4f",94195:"6bbd3a50",94424:"f52c6355",94439:"dea2074e",94478:"9c5e9030",94629:"2eaf9893",94640:"085012a9",94791:"98522ec5",95012:"f0e1bf8f",95086:"fae61d1a",95260:"573f146c",95369:"87993913",96136:"63754c59",96188:"52a77d95",96247:"3fa61b10",96369:"31df64c6",96739:"522823a5",96938:"fc55850f",96992:"0a998dcd",97451:"91c6999c",97507:"79735d81",97597:"ed8ae9e8",97616:"7dbcaa4f",97622:"5a16c207",97689:"64d0c3a5",97892:"38c82c13",97899:"9dc5e1cd",98091:"24259d6e",98094:"b271d297",98381:"76ca2bf9",98647:"956d09f7",98651:"23f302c8",98953:"83d60e10",98959:"963763fc",99056:"93e03b8e",99181:"b04f2d24",99348:"7be329f9",99548:"361872e3",99553:"0e71de72",99646:"8962e152",99670:"0d439352",99722:"c78385f1",99924:"5ffd65fa",99944:"7deb33f6"}[e]+".js"},n.miniCssF=function(e){return"assets/css/styles.e9faf9bb.css"},n.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),n.o=function(e,f){return Object.prototype.hasOwnProperty.call(e,f)},a={},d="linkis-web-apache:",n.l=function(e,f,c,b){if(a[e])a[e].push(f);else{var t,r;if(void 0!==c)for(var o=document.getElementsByTagName("script"),i=0;i Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/02/08/how-to-user-blog/index.html b/zh-CN/blog/2022/02/08/how-to-user-blog/index.html index e10feaa6084..ea2b67a532a 100644 --- a/zh-CN/blog/2022/02/08/how-to-user-blog/index.html +++ b/zh-CN/blog/2022/02/08/how-to-user-blog/index.html @@ -7,7 +7,7 @@ 如何编写博客 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/02/21/linkis-deploy/index.html b/zh-CN/blog/2022/02/21/linkis-deploy/index.html index 143847e2ec4..078b0bda8ca 100644 --- a/zh-CN/blog/2022/02/21/linkis-deploy/index.html +++ b/zh-CN/blog/2022/02/21/linkis-deploy/index.html @@ -7,7 +7,7 @@ Linkis 部署排障 | Apache Linkis - + @@ -78,7 +78,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/03/20/openlookeng/index.html b/zh-CN/blog/2022/03/20/openlookeng/index.html index a9f62730f4d..ff55b1b3692 100644 --- a/zh-CN/blog/2022/03/20/openlookeng/index.html +++ b/zh-CN/blog/2022/03/20/openlookeng/index.html @@ -7,7 +7,7 @@ OpenLooKeng的引擎的实现 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html b/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html index 63c563a4431..c947e2ca332 100644 --- a/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html +++ b/zh-CN/blog/2022/04/15/how-to-download-engineconn-plugin/index.html @@ -7,7 +7,7 @@ 如何下载安装包中默认没有的引擎插件 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/06/09/meetup-content-review/index.html b/zh-CN/blog/2022/06/09/meetup-content-review/index.html index 6421019625a..a8377c62ba2 100644 --- a/zh-CN/blog/2022/06/09/meetup-content-review/index.html +++ b/zh-CN/blog/2022/06/09/meetup-content-review/index.html @@ -7,7 +7,7 @@ Apache Linkis(Incubating) Meep Up | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html b/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html index cec5bef4592..f423003f1c7 100644 --- a/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html +++ b/zh-CN/blog/2022/07/04/how-to-add-auto-bot/index.html @@ -7,7 +7,7 @@ 如何为github仓库添加一个github action | Apache Linkis - + @@ -37,7 +37,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html b/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html index 089e469eb7b..19c5534ff7b 100644 --- a/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html +++ b/zh-CN/blog/2022/07/16/deploy-linkis-with-kubernetes/index.html @@ -7,7 +7,7 @@ 部署Linkis到Kubernetes | Apache Linkis - + @@ -51,7 +51,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/archive/index.html b/zh-CN/blog/archive/index.html index 31d4188cbd5..abee5a91638 100644 --- a/zh-CN/blog/archive/index.html +++ b/zh-CN/blog/archive/index.html @@ -7,7 +7,7 @@ 历史博文 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/index.html b/zh-CN/blog/index.html index d00cfa14e28..a33d53542b4 100644 --- a/zh-CN/blog/index.html +++ b/zh-CN/blog/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -77,7 +77,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/page/2/index.html b/zh-CN/blog/page/2/index.html index 8cf2d5c9035..b6f616344ca 100644 --- a/zh-CN/blog/page/2/index.html +++ b/zh-CN/blog/page/2/index.html @@ -7,7 +7,7 @@ Blog | Apache Linkis - + @@ -78,7 +78,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/blog/index.html b/zh-CN/blog/tags/blog/index.html index bbd6018acb8..0a039e46f2b 100644 --- a/zh-CN/blog/tags/blog/index.html +++ b/zh-CN/blog/tags/blog/index.html @@ -7,7 +7,7 @@ 1 篇博文 含有标签「blog | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/engine/index.html b/zh-CN/blog/tags/engine/index.html index c6cac7666ba..1f7401fb4d0 100644 --- a/zh-CN/blog/tags/engine/index.html +++ b/zh-CN/blog/tags/engine/index.html @@ -7,7 +7,7 @@ 2 篇博文 含有标签「engine | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/github/index.html b/zh-CN/blog/tags/github/index.html index f69a38559a9..e4c7aee4ac5 100644 --- a/zh-CN/blog/tags/github/index.html +++ b/zh-CN/blog/tags/github/index.html @@ -7,7 +7,7 @@ 2 篇博文 含有标签「github | Apache Linkis - + @@ -70,7 +70,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/guide/index.html b/zh-CN/blog/tags/guide/index.html index 3c1f56cb942..1516bb5ac95 100644 --- a/zh-CN/blog/tags/guide/index.html +++ b/zh-CN/blog/tags/guide/index.html @@ -7,7 +7,7 @@ 2 篇博文 含有标签「guide | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/index.html b/zh-CN/blog/tags/index.html index 0c4b33f42d3..13fd46b7935 100644 --- a/zh-CN/blog/tags/index.html +++ b/zh-CN/blog/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/blog/tags/meetup/index.html b/zh-CN/blog/tags/meetup/index.html index 015f527f955..b7f9e7cebe4 100644 --- a/zh-CN/blog/tags/meetup/index.html +++ b/zh-CN/blog/tags/meetup/index.html @@ -7,7 +7,7 @@ 1 篇博文 含有标签「meetup | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/apache-product-name-usage-guide/index.html b/zh-CN/community/apache-product-name-usage-guide/index.html index 210eb2de611..262fda82649 100644 --- a/zh-CN/community/apache-product-name-usage-guide/index.html +++ b/zh-CN/community/apache-product-name-usage-guide/index.html @@ -7,7 +7,7 @@ APACHE 产品名称使用指南 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/api/index.html b/zh-CN/community/development_specification/api/index.html index 16399a8d156..14767a6f141 100644 --- a/zh-CN/community/development_specification/api/index.html +++ b/zh-CN/community/development_specification/api/index.html @@ -7,7 +7,7 @@ 接口规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/concurrent/index.html b/zh-CN/community/development_specification/concurrent/index.html index 1459153aed1..73f1a3e6551 100644 --- a/zh-CN/community/development_specification/concurrent/index.html +++ b/zh-CN/community/development_specification/concurrent/index.html @@ -7,7 +7,7 @@ 并发规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/exception_catch/index.html b/zh-CN/community/development_specification/exception_catch/index.html index 16641053efa..dcff8c28c77 100644 --- a/zh-CN/community/development_specification/exception_catch/index.html +++ b/zh-CN/community/development_specification/exception_catch/index.html @@ -7,7 +7,7 @@ 异常规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/license/index.html b/zh-CN/community/development_specification/license/index.html index 38ab4a758c3..ebaeecab9e7 100644 --- a/zh-CN/community/development_specification/license/index.html +++ b/zh-CN/community/development_specification/license/index.html @@ -7,7 +7,7 @@ License 须知 | Apache Linkis - + @@ -39,7 +39,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/log/index.html b/zh-CN/community/development_specification/log/index.html index c27063e4063..b385f93fb95 100644 --- a/zh-CN/community/development_specification/log/index.html +++ b/zh-CN/community/development_specification/log/index.html @@ -7,7 +7,7 @@ 日志规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/overview/index.html b/zh-CN/community/development_specification/overview/index.html index 63f0997ccbb..4f9eccc1dab 100644 --- a/zh-CN/community/development_specification/overview/index.html +++ b/zh-CN/community/development_specification/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/path_usage/index.html b/zh-CN/community/development_specification/path_usage/index.html index 9051e52e77a..d68becc8937 100644 --- a/zh-CN/community/development_specification/path_usage/index.html +++ b/zh-CN/community/development_specification/path_usage/index.html @@ -7,7 +7,7 @@ 路径规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/programming_specification/index.html b/zh-CN/community/development_specification/programming_specification/index.html index 523b6dab72f..2b86fe1a6ff 100644 --- a/zh-CN/community/development_specification/programming_specification/index.html +++ b/zh-CN/community/development_specification/programming_specification/index.html @@ -7,7 +7,7 @@ 编程规约 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/release-notes/index.html b/zh-CN/community/development_specification/release-notes/index.html index 2f5c3acb4d9..e5c3875ab80 100644 --- a/zh-CN/community/development_specification/release-notes/index.html +++ b/zh-CN/community/development_specification/release-notes/index.html @@ -7,7 +7,7 @@ Release-Notes 编写规范 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/unit_test/index.html b/zh-CN/community/development_specification/unit_test/index.html index faa90ca7aab..dad39348c64 100644 --- a/zh-CN/community/development_specification/unit_test/index.html +++ b/zh-CN/community/development_specification/unit_test/index.html @@ -7,7 +7,7 @@ 测试规约 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/development_specification/version_feature_specifications/index.html b/zh-CN/community/development_specification/version_feature_specifications/index.html index 55eecabdb0a..a0d8901194c 100644 --- a/zh-CN/community/development_specification/version_feature_specifications/index.html +++ b/zh-CN/community/development_specification/version_feature_specifications/index.html @@ -7,7 +7,7 @@ 版本和新特性规范 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-contribute-to-website/index.html b/zh-CN/community/how-to-contribute-to-website/index.html index 991c361fde7..12851043464 100644 --- a/zh-CN/community/how-to-contribute-to-website/index.html +++ b/zh-CN/community/how-to-contribute-to-website/index.html @@ -7,7 +7,7 @@ 如何参与官网贡献 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-contribute/index.html b/zh-CN/community/how-to-contribute/index.html index 7ab9d47f23c..8c0a4450c80 100644 --- a/zh-CN/community/how-to-contribute/index.html +++ b/zh-CN/community/how-to-contribute/index.html @@ -7,7 +7,7 @@ 如何参与项目贡献 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-email/index.html b/zh-CN/community/how-to-email/index.html index 82e07b1fc20..9be3eabd86e 100644 --- a/zh-CN/community/how-to-email/index.html +++ b/zh-CN/community/how-to-email/index.html @@ -7,7 +7,7 @@ 如何使用邮件列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-participate-in-developer-meetings/index.html b/zh-CN/community/how-to-participate-in-developer-meetings/index.html index 2180abae1fc..c82dd0acb4a 100644 --- a/zh-CN/community/how-to-participate-in-developer-meetings/index.html +++ b/zh-CN/community/how-to-participate-in-developer-meetings/index.html @@ -7,7 +7,7 @@ 如何参与开发者例会 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-release/index.html b/zh-CN/community/how-to-release/index.html index de5c2955013..480783945e0 100644 --- a/zh-CN/community/how-to-release/index.html +++ b/zh-CN/community/how-to-release/index.html @@ -7,7 +7,7 @@ 如何发布版本 | Apache Linkis - + @@ -162,7 +162,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-sign-apache-icla/index.html b/zh-CN/community/how-to-sign-apache-icla/index.html index 3e5affc43ce..3add7e18580 100644 --- a/zh-CN/community/how-to-sign-apache-icla/index.html +++ b/zh-CN/community/how-to-sign-apache-icla/index.html @@ -7,7 +7,7 @@ ICLA 签署流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-subscribe/index.html b/zh-CN/community/how-to-subscribe/index.html index eb259cc5790..850a04e6be6 100644 --- a/zh-CN/community/how-to-subscribe/index.html +++ b/zh-CN/community/how-to-subscribe/index.html @@ -7,7 +7,7 @@ 订阅邮件列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-verify/index.html b/zh-CN/community/how-to-verify/index.html index 30df0359031..de6c9b84333 100644 --- a/zh-CN/community/how-to-verify/index.html +++ b/zh-CN/community/how-to-verify/index.html @@ -7,7 +7,7 @@ 发布版本验证 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-vote-a-committer-ppmc/index.html b/zh-CN/community/how-to-vote-a-committer-ppmc/index.html index 5001b5f2ae0..5b5c559ccc7 100644 --- a/zh-CN/community/how-to-vote-a-committer-ppmc/index.html +++ b/zh-CN/community/how-to-vote-a-committer-ppmc/index.html @@ -7,7 +7,7 @@ 如何提名新的Committer 和 PPMC | Apache Linkis - + @@ -50,7 +50,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/how-to-write-unit-test-code/index.html b/zh-CN/community/how-to-write-unit-test-code/index.html index f90a89efb0b..0e6c7d65280 100644 --- a/zh-CN/community/how-to-write-unit-test-code/index.html +++ b/zh-CN/community/how-to-write-unit-test-code/index.html @@ -7,7 +7,7 @@ 如何编写单元测试代码 | Apache Linkis - + @@ -61,7 +61,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/microservice-division/index.html b/zh-CN/community/microservice-division/index.html index d3ed9324066..4345854e4f0 100644 --- a/zh-CN/community/microservice-division/index.html +++ b/zh-CN/community/microservice-division/index.html @@ -7,7 +7,7 @@ 微服务的划分 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/ppmc-related-permission-configuration/index.html b/zh-CN/community/ppmc-related-permission-configuration/index.html index f7999c7ca51..f2ca7b469ed 100644 --- a/zh-CN/community/ppmc-related-permission-configuration/index.html +++ b/zh-CN/community/ppmc-related-permission-configuration/index.html @@ -7,7 +7,7 @@ PPMC/Committer 相关权限配置 | Apache Linkis - + @@ -30,7 +30,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/security/index.html b/zh-CN/community/security/index.html index 041db75451a..18c0cae069e 100644 --- a/zh-CN/community/security/index.html +++ b/zh-CN/community/security/index.html @@ -7,7 +7,7 @@ 安全 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/community/site-map/index.html b/zh-CN/community/site-map/index.html index 499534b16f4..9e6b298101f 100644 --- a/zh-CN/community/site-map/index.html +++ b/zh-CN/community/site-map/index.html @@ -7,7 +7,7 @@ 站点地图 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/api/login_api/index.html b/zh-CN/docs/0.11.0/api/login_api/index.html index 8656730d1d0..638d530ba23 100644 --- a/zh-CN/docs/0.11.0/api/login_api/index.html +++ b/zh-CN/docs/0.11.0/api/login_api/index.html @@ -7,7 +7,7 @@ 登陆 Api | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/api/rest_api/index.html b/zh-CN/docs/0.11.0/api/rest_api/index.html index 9b8c151b530..d89dc92b474 100644 --- a/zh-CN/docs/0.11.0/api/rest_api/index.html +++ b/zh-CN/docs/0.11.0/api/rest_api/index.html @@ -7,7 +7,7 @@ Restful Api | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/api/web_socket/index.html b/zh-CN/docs/0.11.0/api/web_socket/index.html index a48d322615f..a350360232e 100644 --- a/zh-CN/docs/0.11.0/api/web_socket/index.html +++ b/zh-CN/docs/0.11.0/api/web_socket/index.html @@ -7,7 +7,7 @@ WebSocket | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html b/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html index 9ad93cebf92..0768aa1becd 100644 --- a/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html +++ b/zh-CN/docs/0.11.0/architecture/commons/real-time_log_push/index.html @@ -7,7 +7,7 @@ 异步日志实时推送 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html b/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html index de64c96b0ab..07bb24385ff 100644 --- a/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html +++ b/zh-CN/docs/0.11.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html b/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html index a0f3d57ca75..cee54841175 100644 --- a/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html +++ b/zh-CN/docs/0.11.0/architecture/commons/scheduler/index.html @@ -7,7 +7,7 @@ Scheduler架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/overview/index.html b/zh-CN/docs/0.11.0/architecture/overview/index.html index 04535755923..b39165bc193 100644 --- a/zh-CN/docs/0.11.0/architecture/overview/index.html +++ b/zh-CN/docs/0.11.0/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/rm/index.html b/zh-CN/docs/0.11.0/architecture/rm/index.html index 028c89b96a6..be6db0fa1c4 100644 --- a/zh-CN/docs/0.11.0/architecture/rm/index.html +++ b/zh-CN/docs/0.11.0/architecture/rm/index.html @@ -7,7 +7,7 @@ RM 设计 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html b/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html index 78c6967d0aa..f0fff10cd0a 100644 --- a/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html +++ b/zh-CN/docs/0.11.0/architecture/storage/file_system/index.html @@ -7,7 +7,7 @@ 对接多种文件系统 | Apache Linkis - + @@ -27,7 +27,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html b/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html index dd786f03014..cd4821773df 100644 --- a/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html +++ b/zh-CN/docs/0.11.0/architecture/storage/remote_file_system_architecture_design/index.html @@ -7,7 +7,7 @@ 访问远程文件系统架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html b/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html index ccb716c6e2b..791c025a1cd 100644 --- a/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html +++ b/zh-CN/docs/0.11.0/architecture/storage/resultset_file/index.html @@ -7,7 +7,7 @@ 结果集文件存储 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html b/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html index a0c5bde32f1..5688eed3946 100644 --- a/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html +++ b/zh-CN/docs/0.11.0/architecture/ujes/asynchronous_thread_pool/index.html @@ -7,7 +7,7 @@ 全异步线程池调用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html b/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html index 44499090fb9..53f4f2e455c 100644 --- a/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html +++ b/zh-CN/docs/0.11.0/architecture/ujes/file_import_and_export_structure/index.html @@ -7,7 +7,7 @@ Spark引擎的文件导入导出 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html b/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html index 815786a572b..609b9f5269c 100644 --- a/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html +++ b/zh-CN/docs/0.11.0/architecture/ujes/ujes_design/index.html @@ -7,7 +7,7 @@ UJES设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/architecture/websocket/index.html b/zh-CN/docs/0.11.0/architecture/websocket/index.html index ddb9ec319b5..fac6bc89326 100644 --- a/zh-CN/docs/0.11.0/architecture/websocket/index.html +++ b/zh-CN/docs/0.11.0/architecture/websocket/index.html @@ -7,7 +7,7 @@ WebSocket请求转发实现 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html index 10929c974e9..4f4121811cf 100644 --- a/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/0.11.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html b/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html index 7d092aaceea..560224cf9d4 100644 --- a/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html +++ b/zh-CN/docs/0.11.0/deployment/production_deployment _guide/index.html @@ -7,7 +7,7 @@ 生产部署参考指南 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html b/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html index 9dabd3b1c73..1b3aa397d98 100644 --- a/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html +++ b/zh-CN/docs/0.11.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/quick_start/index.html b/zh-CN/docs/0.11.0/deployment/quick_start/index.html index ba9a21903d3..992a209673e 100644 --- a/zh-CN/docs/0.11.0/deployment/quick_start/index.html +++ b/zh-CN/docs/0.11.0/deployment/quick_start/index.html @@ -7,7 +7,7 @@ 快速启动 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html index c0bf86e6610..cfb37806867 100644 --- a/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/0.11.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/compile_and_package/index.html b/zh-CN/docs/0.11.0/development/compile_and_package/index.html index c7e01302f1f..89d0a1ff2b1 100644 --- a/zh-CN/docs/0.11.0/development/compile_and_package/index.html +++ b/zh-CN/docs/0.11.0/development/compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/install-server/index.html b/zh-CN/docs/0.11.0/development/install-server/index.html index c04b2422611..4eb968aed4c 100644 --- a/zh-CN/docs/0.11.0/development/install-server/index.html +++ b/zh-CN/docs/0.11.0/development/install-server/index.html @@ -7,7 +7,7 @@ 单个服务的安装 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/new_engine_conn/index.html b/zh-CN/docs/0.11.0/development/new_engine_conn/index.html index ad7ba238bbe..2fb834cbe5d 100644 --- a/zh-CN/docs/0.11.0/development/new_engine_conn/index.html +++ b/zh-CN/docs/0.11.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/development/start-server/index.html b/zh-CN/docs/0.11.0/development/start-server/index.html index 6ea1002bcce..67f41414908 100644 --- a/zh-CN/docs/0.11.0/development/start-server/index.html +++ b/zh-CN/docs/0.11.0/development/start-server/index.html @@ -7,7 +7,7 @@ 单个服务的启动 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/engine_usage/hive/index.html b/zh-CN/docs/0.11.0/engine_usage/hive/index.html index d08ee97b5a1..d54406a86ea 100644 --- a/zh-CN/docs/0.11.0/engine_usage/hive/index.html +++ b/zh-CN/docs/0.11.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/engine_usage/python/index.html b/zh-CN/docs/0.11.0/engine_usage/python/index.html index 0b552a45306..759d58f0480 100644 --- a/zh-CN/docs/0.11.0/engine_usage/python/index.html +++ b/zh-CN/docs/0.11.0/engine_usage/python/index.html @@ -7,7 +7,7 @@ Python引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/engine_usage/spark/index.html b/zh-CN/docs/0.11.0/engine_usage/spark/index.html index 7fab93a6506..c10d233109c 100644 --- a/zh-CN/docs/0.11.0/engine_usage/spark/index.html +++ b/zh-CN/docs/0.11.0/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark引擎 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/introduction/index.html b/zh-CN/docs/0.11.0/introduction/index.html index a64968405c1..a50173a79f1 100644 --- a/zh-CN/docs/0.11.0/introduction/index.html +++ b/zh-CN/docs/0.11.0/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/tags/index.html b/zh-CN/docs/0.11.0/tags/index.html index a4fa3162ad0..3a8e6b9afc7 100644 --- a/zh-CN/docs/0.11.0/tags/index.html +++ b/zh-CN/docs/0.11.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html b/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html index f6283878121..e56569ba83e 100644 --- a/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html +++ b/zh-CN/docs/0.11.0/upgrade/upgrade_from_0.9.0_to_0.9.1_guide/index.html @@ -7,7 +7,7 @@ 0.9.0 升级 0.9.1 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html b/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html index 8978b5b9452..8e11dd76969 100644 --- a/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html +++ b/zh-CN/docs/0.11.0/user_guide/1.0_sdk_manual/index.html @@ -7,7 +7,7 @@ 1.0 SDK的使用 | Apache Linkis - + @@ -60,7 +60,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html b/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html index fe6a85cd5a4..242eb223f8a 100644 --- a/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html +++ b/zh-CN/docs/0.11.0/user_guide/X_sdk_manual/index.html @@ -7,7 +7,7 @@ 0.X SDK的使用 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/jdbc_api/index.html b/zh-CN/docs/1.0.2/api/jdbc_api/index.html index 33ffcd19817..c5c25ac1fd5 100644 --- a/zh-CN/docs/1.0.2/api/jdbc_api/index.html +++ b/zh-CN/docs/1.0.2/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行JDBC API文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html b/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html index 87ad6f30b5d..91d95b0fd7f 100644 --- a/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.0.2/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/login_api/index.html b/zh-CN/docs/1.0.2/api/login_api/index.html index 697deb80712..6667cdc9327 100644 --- a/zh-CN/docs/1.0.2/api/login_api/index.html +++ b/zh-CN/docs/1.0.2/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/api/overview/index.html b/zh-CN/docs/1.0.2/api/overview/index.html index 8e47a5e2fca..611ff84d747 100644 --- a/zh-CN/docs/1.0.2/api/overview/index.html +++ b/zh-CN/docs/1.0.2/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html index 84e866d7da8..a5e0d49e4c3 100644 --- a/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.0.2/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html index 708db2e0f00..3bc75ed48c5 100644 --- a/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.0.2/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html b/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html index a9429e16bb5..bc4ddf8fa19 100644 --- a/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.0.2/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html index 51ab1a80c22..875a44c8611 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html index a444e9ead2b..d224e0dd5b5 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index f64ee14fbfa..66da8028174 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html index 3fdc6af1905..5abe220e4f2 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html index a21bd22fe40..6b259c14755 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 0fb49945c7a..beb2d79a4fc 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html index bc3d148d07d..eb28b0b6ea5 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html index b604174df33..674aa9b672e 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index a78327df6ab..d02144c12ea 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html index 95196639e17..00cf8d27a7a 100644 --- a/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html index 3d449041524..1b621649113 100644 --- a/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.0.2/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html index 1bd7d973eb4..a4874080002 100644 --- a/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.0.2/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html index 2e9abb84b3c..cf346174150 100644 --- a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html index 213734d529a..99051b6189e 100644 --- a/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/overview/index.html b/zh-CN/docs/1.0.2/architecture/overview/index.html index 6c9f5fad343..04ee0c47183 100644 --- a/zh-CN/docs/1.0.2/architecture/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html index 9888033a9e6..b29dc817d8c 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML 物料库架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html index ec8af321da5..74837240b3b 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 5f097e87a70..5872f24211b 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html index be110013830..3080a6670e8 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 6ab1a218ae2..e983c1ac9ff 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 7d2fa0a1b96..0653df32298 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index 74f7516fce1..0b07c4ed427 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html index 8accb87793b..fe5e86df70e 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html index 1bf16f6541e..c8e46f54c0c 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html index 02acac3512e..ca97edef3ac 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html index 4cc7e3d61cd..5f17ead5fc0 100644 --- a/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.0.2/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/contact/index.html b/zh-CN/docs/1.0.2/contact/index.html index 97a235c726e..a4f26b80be6 100644 --- a/zh-CN/docs/1.0.2/contact/index.html +++ b/zh-CN/docs/1.0.2/contact/index.html @@ -7,7 +7,7 @@ 联系我们 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html b/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html index cc0bf48dd05..ec7e4f75c80 100644 --- a/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.0.2/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html index 5ddabc54f46..ddaea80fdeb 100644 --- a/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.0.2/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html index 90db58949d6..50ccb107e7e 100644 --- a/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.2/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html b/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html index 5156069ed41..a6ecfc7ec75 100644 --- a/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.0.2/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html index 2075fe977e3..072d3e22265 100644 --- a/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.2/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/deployment/web_install/index.html b/zh-CN/docs/1.0.2/deployment/web_install/index.html index 8b6735871c6..9064c09a061 100644 --- a/zh-CN/docs/1.0.2/deployment/web_install/index.html +++ b/zh-CN/docs/1.0.2/deployment/web_install/index.html @@ -7,7 +7,7 @@ 前端管理台部署 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html index d56cbfba2e2..367b4ba9538 100644 --- a/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.0.2/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/linkis_debug/index.html b/zh-CN/docs/1.0.2/development/linkis_debug/index.html index 014203aa716..96011f0096b 100644 --- a/zh-CN/docs/1.0.2/development/linkis_debug/index.html +++ b/zh-CN/docs/1.0.2/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/new_engine_conn/index.html b/zh-CN/docs/1.0.2/development/new_engine_conn/index.html index a2b18b9190f..77178a78d19 100644 --- a/zh-CN/docs/1.0.2/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.0.2/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/development/web_build/index.html b/zh-CN/docs/1.0.2/development/web_build/index.html index e66c515894f..0f249f81c25 100644 --- a/zh-CN/docs/1.0.2/development/web_build/index.html +++ b/zh-CN/docs/1.0.2/development/web_build/index.html @@ -7,7 +7,7 @@ 前端管理台编译 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/hive/index.html b/zh-CN/docs/1.0.2/engine_usage/hive/index.html index f9724a2f4f6..2dd8ab536dc 100644 --- a/zh-CN/docs/1.0.2/engine_usage/hive/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html b/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html index 4360ec93fd7..1085290f1cd 100644 --- a/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/overview/index.html b/zh-CN/docs/1.0.2/engine_usage/overview/index.html index 6f3da574846..8c15a4523ab 100644 --- a/zh-CN/docs/1.0.2/engine_usage/overview/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/python/index.html b/zh-CN/docs/1.0.2/engine_usage/python/index.html index a5ae32c08d7..fb1e84b0fd0 100644 --- a/zh-CN/docs/1.0.2/engine_usage/python/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/python/index.html @@ -7,21 +7,21 @@ Python 引擎 | Apache Linkis - +
    -
    Version: 1.0.2

    Python 引擎

    本文主要介绍在Linkis1.0中,Python引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 +

    Version: 1.0.2

    Python 引擎

    本文主要介绍在Linkis1.0中,Python引擎的配置、部署和使用。

    1.Python引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本。

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    2.3 python引擎的标签#

    此处可以使用默认的dml.sql进行插入即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的\$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. 对于Python任务您只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType 

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Python的使用如下:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,右键目录然后新建python脚本并编写python代码并点击执行。

    python的执行逻辑是通过 Py4j的方式,启动一个的python -的gateway,然后Python引擎将代码提交到python的执行器进行执行。

    图3-1 python的执行效果截图

    4.Python引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如python的版本和以及python需要加载的一些module等。

    图4-1 python的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/shell/index.html b/zh-CN/docs/1.0.2/engine_usage/shell/index.html index 06ce3f079c5..0a70bb3d366 100644 --- a/zh-CN/docs/1.0.2/engine_usage/shell/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/engine_usage/spark/index.html b/zh-CN/docs/1.0.2/engine_usage/spark/index.html index 989851f9a06..883d9c58518 100644 --- a/zh-CN/docs/1.0.2/engine_usage/spark/index.html +++ b/zh-CN/docs/1.0.2/engine_usage/spark/index.html @@ -7,15 +7,15 @@ Spark 引擎 | Apache Linkis - +
    Version: 1.0.2

    Spark 引擎

    本文主要介绍在Linkis1.0中,spark引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用spark引擎,您需要保证以下的环境变量已经设置正确并且引擎的启动用户是有这些环境变量的。

    强烈建议您在执行spark任务之前,检查下执行用户的这些环境变量。

    环境变量名环境变量内容备注
    JAVA_HOMEJDK安装路径必须
    HADOOP_HOMEHadoop安装路径必须
    HADOOP_CONF_DIRHadoop配置路径必须
    HIVE_CONF_DIRHive配置路径必须
    SPARK_HOMESpark安装路径必须
    SPARK_CONF_DIRSpark配置路径必须
    pythonpython建议使用anaconda的python作为默认python

    表1-1 环境配置清单

    2.Spark引擎的配置和部署#

    2.1 spark版本的选择和编译#

    理论上Linkis1.0支持的spark2.x以上的所有版本。默认支持的版本Spark2.4.3。如果您想使用您的spark版本,如spark2.1.0,则您仅仅需要将插件spark的版本进行修改,然后进行编译即可。具体的,您可以找到linkis-engineplugin-spark模块,将\<spark.version>标签进行改成2.1.0,然后单独编译此模块即可。

    2.2 spark engineConn部署和加载#

    如果您已经编译完了您的spark引擎的插件已经编译完成,那么您需要将新的插件放置到指定的位置中才能加载,具体可以参考下面这篇文章

    EngineConnPlugin引擎插件安装

    2.3 spark引擎的标签#

    Linkis1.0是通过标签来进行的,所以需要在我们数据库中插入数据,插入的方式如下文所示。

    EngineConnPlugin引擎插件安装 > 2.2 管理台Configuration配置修改(可选)

    3.spark引擎的使用#

    准备操作,队列设置#

    因为spark的执行是需要队列的资源,所以用户在执行之前,必须要设置自己能够执行的队列。

    图3-1 队列设置 您也可以通过在提交参数的StartUpMap里面添加队列的值:startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. -对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    You can also add the queue value in the StartUpMap of the submission parameter: `startupMap.put("wds.linkis.rm.yarnqueue", "dws")`
    -

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), +对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    ## codeType对应关系 py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop
    +# 可以在提交参数通过-confMap wds.linkis.yarnqueue=dws  来指定yarn 队列sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql  -confMap wds.linkis.yarnqueue=dws -code "show tables"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), SparkSession初始化之后,就可以开始执行sql。

    图3-2 sparksql的执行效果截图

    spark-scala的任务,我们已经初始化好了sqlContext等变量,用户可以直接使用这个sqlContext进行sql的执行。

    图3-3 spark-scala的执行效果图

    类似的,pyspark的方式中,我们也已经初始化好了SparkSession,用户可以直接使用spark.sql的方式进行执行sql。

    图3-4 pyspark的执行方式

    4.spark引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如spark会话executor个数和executor的内存。这些参数是为了用户能够更加自由地设置自己的spark的参数,另外spark其他参数也可以进行修改,比如的pyspark的python版本等。

    图4-1 spark的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/introduction/index.html b/zh-CN/docs/1.0.2/introduction/index.html index 8717bbbf916..e008c9c31ac 100644 --- a/zh-CN/docs/1.0.2/introduction/index.html +++ b/zh-CN/docs/1.0.2/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tags/index.html b/zh-CN/docs/1.0.2/tags/index.html index c03f6824170..e2f2427ef73 100644 --- a/zh-CN/docs/1.0.2/tags/index.html +++ b/zh-CN/docs/1.0.2/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html index 9722a82dbf4..4c4ffa45396 100644 --- a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html index 124a4111423..a4a18fb385f 100644 --- a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html index 6485d6c39d1..39621957a64 100644 --- a/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.0.2/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/upgrade/overview/index.html b/zh-CN/docs/1.0.2/upgrade/overview/index.html index 0de1648318d..a9c1a901a5a 100644 --- a/zh-CN/docs/1.0.2/upgrade/overview/index.html +++ b/zh-CN/docs/1.0.2/upgrade/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index ce7b08db68e..792fba9f1c8 100644 --- a/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.0.2/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 1.0升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/console_manual/index.html b/zh-CN/docs/1.0.2/user_guide/console_manual/index.html index 0555d6d5621..83ac3aa1ea0 100644 --- a/zh-CN/docs/1.0.2/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.0.2/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html b/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html index 64913b152a0..4b70c1323b8 100644 --- a/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.0.2/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html index 6a05f14e361..273e6843757 100644 --- a/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.0.2/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/overview/index.html b/zh-CN/docs/1.0.2/user_guide/overview/index.html index d4b340e33ac..96920376326 100644 --- a/zh-CN/docs/1.0.2/user_guide/overview/index.html +++ b/zh-CN/docs/1.0.2/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html b/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html index 01f641a9fa1..41145205654 100644 --- a/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.0.2/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/jdbc_api/index.html b/zh-CN/docs/1.0.3/api/jdbc_api/index.html index 73014403154..1b1b70fedb3 100644 --- a/zh-CN/docs/1.0.3/api/jdbc_api/index.html +++ b/zh-CN/docs/1.0.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html b/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html index ea5032e7f6c..8d1c7b74311 100644 --- a/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.0.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/login_api/index.html b/zh-CN/docs/1.0.3/api/login_api/index.html index b8bf111b167..9ebd0dad5c9 100644 --- a/zh-CN/docs/1.0.3/api/login_api/index.html +++ b/zh-CN/docs/1.0.3/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/api/overview/index.html b/zh-CN/docs/1.0.3/api/overview/index.html index 52a68c483a2..de1f0550be7 100644 --- a/zh-CN/docs/1.0.3/api/overview/index.html +++ b/zh-CN/docs/1.0.3/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html index 0ac9f4f347d..1098ffafb1a 100644 --- a/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.0.3/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html index 5348dc1354f..784c69ec712 100644 --- a/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.0.3/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html b/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html index e9c7030807f..1dcae0c5911 100644 --- a/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.0.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html index 47597b90419..c48aed4cd98 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 5be0fb960a6..e9caf542f0c 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index d6186e5fefd..8930b93306e 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html index dc6e57500d3..72e689acb58 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html index c4bec61bc04..632f2e35b99 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index dd890036f90..2c6e7c8e69d 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 02b83e99af7..327dda7e300 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html index a55557faf7c..5484f20af84 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index ea2dfd21cb0..eb252872e57 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html index b03835e532f..4c475bd37d4 100644 --- a/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html index 96ab6b28d57..ff0f9d687d6 100644 --- a/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.0.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html index b6f7bc7af60..8d642271e60 100644 --- a/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.0.3/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html index 88176f79f30..d8a4986f661 100644 --- a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html index 9a5aae051d6..391b34e5582 100644 --- a/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/overview/index.html b/zh-CN/docs/1.0.3/architecture/overview/index.html index 1b34162d3ef..c407701a45d 100644 --- a/zh-CN/docs/1.0.3/architecture/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html index e48fce29f5f..b4eb0c26a23 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/bml/index.html @@ -7,7 +7,7 @@ BML 物料库架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html index 30a2e20b404..18fdce8641c 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index eed2990ca4d..e6b73e94d76 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index 618abf87e3d..84ae57700b4 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index db6b796baa2..17df8e04677 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index eecc6d9e9df..1590903b7cb 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index eea66a4596b..9c621c3a222 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index 6fb82682866..9f13989ab1d 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html index 1901b523581..51141389831 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html index 4ba9e1a1413..a6d5d36e532 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html index ccb9384f8d2..a64f225419c 100644 --- a/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.0.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/contact/index.html b/zh-CN/docs/1.0.3/contact/index.html index 49301377bcb..6d095b2f63e 100644 --- a/zh-CN/docs/1.0.3/contact/index.html +++ b/zh-CN/docs/1.0.3/contact/index.html @@ -7,7 +7,7 @@ 联系我们 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html b/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html index 2496dff5b82..18dd0a67c9e 100644 --- a/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.0.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html index bc199374bd9..29ad84acacc 100644 --- a/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.0.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html index d03b45114f0..2fb5741c0f0 100644 --- a/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html b/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html index 841b9c5e89f..9d7586e24d1 100644 --- a/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.0.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html index 2268e1b3ef6..772e802f7cf 100644 --- a/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.0.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/deployment/web_install/index.html b/zh-CN/docs/1.0.3/deployment/web_install/index.html index 768f7dc0f98..0af0a6d6347 100644 --- a/zh-CN/docs/1.0.3/deployment/web_install/index.html +++ b/zh-CN/docs/1.0.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ 前端管理台部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html index 7287f6cfef8..6f5dd57844f 100644 --- a/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.0.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/linkis_debug/index.html b/zh-CN/docs/1.0.3/development/linkis_debug/index.html index 5765027d303..c0457f1569c 100644 --- a/zh-CN/docs/1.0.3/development/linkis_debug/index.html +++ b/zh-CN/docs/1.0.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/new_engine_conn/index.html b/zh-CN/docs/1.0.3/development/new_engine_conn/index.html index 3685d6c6ef9..ce43bba1b87 100644 --- a/zh-CN/docs/1.0.3/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.0.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -56,7 +56,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html b/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html index b35bd03a924..10207021a9d 100644 --- a/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html +++ b/zh-CN/docs/1.0.3/development/springmvc-replaces-jersey/index.html @@ -7,7 +7,7 @@ SpringMVC 替换 Jersey 分享 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/development/web_build/index.html b/zh-CN/docs/1.0.3/development/web_build/index.html index b7b57572ef8..7b69834fffe 100644 --- a/zh-CN/docs/1.0.3/development/web_build/index.html +++ b/zh-CN/docs/1.0.3/development/web_build/index.html @@ -7,7 +7,7 @@ 前端管理台编译 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/flink/index.html b/zh-CN/docs/1.0.3/engine_usage/flink/index.html index 26908b0851f..a4141c85d07 100644 --- a/zh-CN/docs/1.0.3/engine_usage/flink/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/hive/index.html b/zh-CN/docs/1.0.3/engine_usage/hive/index.html index b8868c33fcb..a639273ccb4 100644 --- a/zh-CN/docs/1.0.3/engine_usage/hive/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html b/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html index 03f2916a967..15f912f1c10 100644 --- a/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/overview/index.html b/zh-CN/docs/1.0.3/engine_usage/overview/index.html index 3c5c2f337be..87efccdae2a 100644 --- a/zh-CN/docs/1.0.3/engine_usage/overview/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/python/index.html b/zh-CN/docs/1.0.3/engine_usage/python/index.html index da7ecdc1981..388474d78ae 100644 --- a/zh-CN/docs/1.0.3/engine_usage/python/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/python/index.html @@ -7,21 +7,21 @@ Python 引擎 | Apache Linkis - +
    -
    Version: 1.0.3

    Python 引擎

    本文主要介绍在Linkis1.0中,Python引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 +

    Version: 1.0.3

    Python 引擎

    本文主要介绍在Linkis1.0中,Python引擎的配置、部署和使用。

    1.Python引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本。

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    2.3 python引擎的标签#

    此处可以使用默认的dml.sql进行插入即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的\$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. 对于Python任务您只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType 

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Python的使用如下:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,右键目录然后新建python脚本并编写python代码并点击执行。

    python的执行逻辑是通过 Py4j的方式,启动一个的python -的gateway,然后Python引擎将代码提交到python的执行器进行执行。

    图3-1 python的执行效果截图

    4.Python引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如python的版本和以及python需要加载的一些module等。

    图4-1 python的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/shell/index.html b/zh-CN/docs/1.0.3/engine_usage/shell/index.html index 5ae04fb851b..e7ae9bd440e 100644 --- a/zh-CN/docs/1.0.3/engine_usage/shell/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/engine_usage/spark/index.html b/zh-CN/docs/1.0.3/engine_usage/spark/index.html index dc6977027ae..56125c3e904 100644 --- a/zh-CN/docs/1.0.3/engine_usage/spark/index.html +++ b/zh-CN/docs/1.0.3/engine_usage/spark/index.html @@ -7,15 +7,15 @@ Spark 引擎 | Apache Linkis - +
    Version: 1.0.3

    Spark 引擎

    本文主要介绍在Linkis1.0中,spark引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用spark引擎,您需要保证以下的环境变量已经设置正确并且引擎的启动用户是有这些环境变量的。

    强烈建议您在执行spark任务之前,检查下执行用户的这些环境变量。

    环境变量名环境变量内容备注
    JAVA_HOMEJDK安装路径必须
    HADOOP_HOMEHadoop安装路径必须
    HADOOP_CONF_DIRHadoop配置路径必须
    HIVE_CONF_DIRHive配置路径必须
    SPARK_HOMESpark安装路径必须
    SPARK_CONF_DIRSpark配置路径必须
    pythonpython建议使用anaconda的python作为默认python

    表1-1 环境配置清单

    2.Spark引擎的配置和部署#

    2.1 spark版本的选择和编译#

    理论上Linkis1.0支持的spark2.x以上的所有版本。默认支持的版本Spark2.4.3。如果您想使用您的spark版本,如spark2.1.0,则您仅仅需要将插件spark的版本进行修改,然后进行编译即可。具体的,您可以找到linkis-engineplugin-spark模块,将\<spark.version>标签进行改成2.1.0,然后单独编译此模块即可。

    2.2 spark engineConn部署和加载#

    如果您已经编译完了您的spark引擎的插件已经编译完成,那么您需要将新的插件放置到指定的位置中才能加载,具体可以参考下面这篇文章

    EngineConnPlugin引擎插件安装

    2.3 spark引擎的标签#

    Linkis1.0是通过标签来进行的,所以需要在我们数据库中插入数据,插入的方式如下文所示。

    EngineConnPlugin引擎插件安装 > 2.2 管理台Configuration配置修改(可选)

    3.spark引擎的使用#

    准备操作,队列设置#

    因为spark的执行是需要队列的资源,所以用户在执行之前,必须要设置自己能够执行的队列。

    图3-1 队列设置 您也可以通过在提交参数的StartUpMap里面添加队列的值:startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. -对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    You can also add the queue value in the StartUpMap of the submission parameter: `startupMap.put("wds.linkis.rm.yarnqueue", "dws")`
    -

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), +对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    ## codeType对应关系 py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop
    +# 可以在提交参数通过-confMap wds.linkis.yarnqueue=dws  来指定yarn 队列sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql  -confMap wds.linkis.yarnqueue=dws -code "show tables"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), SparkSession初始化之后,就可以开始执行sql。

    图3-2 sparksql的执行效果截图

    spark-scala的任务,我们已经初始化好了sqlContext等变量,用户可以直接使用这个sqlContext进行sql的执行。

    图3-3 spark-scala的执行效果图

    类似的,pyspark的方式中,我们也已经初始化好了SparkSession,用户可以直接使用spark.sql的方式进行执行sql。

    图3-4 pyspark的执行方式

    4.spark引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如spark会话executor个数和executor的内存。这些参数是为了用户能够更加自由地设置自己的spark的参数,另外spark其他参数也可以进行修改,比如的pyspark的python版本等。

    图4-1 spark的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/introduction/index.html b/zh-CN/docs/1.0.3/introduction/index.html index 2ba83e4c017..59903fd91bf 100644 --- a/zh-CN/docs/1.0.3/introduction/index.html +++ b/zh-CN/docs/1.0.3/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tags/index.html b/zh-CN/docs/1.0.3/tags/index.html index b1c0eb6b17d..192cc34a2bb 100644 --- a/zh-CN/docs/1.0.3/tags/index.html +++ b/zh-CN/docs/1.0.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html index 04778f19032..5d91bb72bd8 100644 --- a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html index a48d25cd8f3..6fc2c09755f 100644 --- a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html index 8c7459174a7..41f49c6b4de 100644 --- a/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.0.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/upgrade/overview/index.html b/zh-CN/docs/1.0.3/upgrade/overview/index.html index 630fb29fa74..562019a3b90 100644 --- a/zh-CN/docs/1.0.3/upgrade/overview/index.html +++ b/zh-CN/docs/1.0.3/upgrade/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 950a901c886..10fed47a84c 100644 --- a/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.0.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 1.0升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/console_manual/index.html b/zh-CN/docs/1.0.3/user_guide/console_manual/index.html index bf73597aa3c..c08bc4ff8da 100644 --- a/zh-CN/docs/1.0.3/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.0.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html b/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html index 0d0c20b74e9..4a78ddd5920 100644 --- a/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.0.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html index b6e6db83c06..7e71390933d 100644 --- a/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.0.3/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/overview/index.html b/zh-CN/docs/1.0.3/user_guide/overview/index.html index f7e25b4d529..184d50394a3 100644 --- a/zh-CN/docs/1.0.3/user_guide/overview/index.html +++ b/zh-CN/docs/1.0.3/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html b/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html index fe8cf9d6292..a2a0fe29590 100644 --- a/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.0.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html b/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html index 71ddee26128..2f41c339830 100644 --- a/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html +++ b/zh-CN/docs/1.1.0/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源接口 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html b/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html index 5ade322a8a9..6abef6f5782 100644 --- a/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html +++ b/zh-CN/docs/1.1.0/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询接口 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/jdbc_api/index.html b/zh-CN/docs/1.1.0/api/jdbc_api/index.html index 5c75d3f7d54..dc45e7328f1 100644 --- a/zh-CN/docs/1.1.0/api/jdbc_api/index.html +++ b/zh-CN/docs/1.1.0/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html b/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html index f35019cad52..cdff63e14cb 100644 --- a/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.1.0/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/login_api/index.html b/zh-CN/docs/1.1.0/api/login_api/index.html index 8e77ad8dc4f..e437f4e8691 100644 --- a/zh-CN/docs/1.1.0/api/login_api/index.html +++ b/zh-CN/docs/1.1.0/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/api/overview/index.html b/zh-CN/docs/1.1.0/api/overview/index.html index 82aeb6a5588..adb9b021990 100644 --- a/zh-CN/docs/1.1.0/api/overview/index.html +++ b/zh-CN/docs/1.1.0/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html index a467d18ceb0..3ad3ac86f38 100644 --- a/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.1.0/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html index 6972b666dbb..a1722305e5e 100644 --- a/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.1.0/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html b/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html index 467b0c50139..e057d0c1a5c 100644 --- a/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.1.0/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html index 8e4c24a4ef2..446c8ac8149 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 97cd480bb37..161076ff185 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 3f18d6548e1..83f202380b8 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html index 49a8130ad7e..5b57e841f93 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html index b7d3c566123..85f2fe3d698 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html index bdb13ab1870..dd2405d2230 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 40105d5af42..ba8f2eab6fb 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html index 16ad4d51748..bbd93e6aa2e 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 54d4cc2d896..3ac1eb9223a 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html index f37b313f8d0..36259d12977 100644 --- a/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html index d05219e621e..fb0ccdb1dec 100644 --- a/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.1.0/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html index b2ccaf9d521..1eea3175921 100644 --- a/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.1.0/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html index 99e1fb53bcd..e53355429ba 100644 --- a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html index dd9fa1eb2c3..ef4d1693381 100644 --- a/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/overview/index.html b/zh-CN/docs/1.1.0/architecture/overview/index.html index 45a930eb390..a05025162d5 100644 --- a/zh-CN/docs/1.1.0/architecture/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 51423000bf8..d2753cd1940 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html index 051a783a80a..df816398190 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html index e383c5249cc..7f42c48bf9a 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html index ccd1e2c5fe0..a178bd38407 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html index 726de99a182..2509c49b79a 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 0f9fa79e385..57cb3fed3aa 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 5077344cbbe..bb57331912c 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index cdd3ee3093a..5df569f04a0 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html index a78db2bca74..d85bfc0923c 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html index 3444874cfd0..1d56d7fa87b 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html index 24f701d6253..def027bd9b5 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html index f8d0d5ba4a9..2f1d8aa306b 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html index e05d1a8ea76..bf0d1f21438 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html index 7d942adcca0..79da78aec9a 100644 --- a/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.1.0/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html b/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html index 589ea01d9a0..31b1bed1c56 100644 --- a/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.1.0/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html index d04ad4bdd63..112ba623ecd 100644 --- a/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.1.0/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 安装 EngineConnPlugin 引擎 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html index bb404927560..2b373e8daa5 100644 --- a/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.0/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html index 3b5f9eea01d..e64e45c24ed 100644 --- a/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/1.1.0/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html b/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html index 8e485285314..5fc26c9a11b 100644 --- a/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.1.0/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html index 0318fc6f049..8e4d9a825d0 100644 --- a/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.0/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html b/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html index 293b4439a10..ef9c769785a 100644 --- a/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/1.1.0/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/deployment/web_install/index.html b/zh-CN/docs/1.1.0/deployment/web_install/index.html index 08416259526..951216993d6 100644 --- a/zh-CN/docs/1.1.0/deployment/web_install/index.html +++ b/zh-CN/docs/1.1.0/deployment/web_install/index.html @@ -7,7 +7,7 @@ 前端管理台部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html index 27efd006623..86ad142b22b 100644 --- a/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 编译打包 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_config/index.html b/zh-CN/docs/1.1.0/development/linkis_config/index.html index 0599ad4bdfc..3a62da9d4a1 100644 --- a/zh-CN/docs/1.1.0/development/linkis_config/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_debug/index.html b/zh-CN/docs/1.1.0/development/linkis_debug/index.html index a194a3a1916..576a50ec5e3 100644 --- a/zh-CN/docs/1.1.0/development/linkis_debug/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -49,7 +49,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html b/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html index 9717a464658..9a6cc8418c1 100644 --- a/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/1.1.0/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/new_engine_conn/index.html b/zh-CN/docs/1.1.0/development/new_engine_conn/index.html index 9e262c3fcec..f4f022ee47e 100644 --- a/zh-CN/docs/1.1.0/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.1.0/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -56,7 +56,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/development/web_build/index.html b/zh-CN/docs/1.1.0/development/web_build/index.html index 6e7cce5d37e..d721a046926 100644 --- a/zh-CN/docs/1.1.0/development/web_build/index.html +++ b/zh-CN/docs/1.1.0/development/web_build/index.html @@ -7,7 +7,7 @@ 前端管理台编译 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/flink/index.html b/zh-CN/docs/1.1.0/engine_usage/flink/index.html index cf8454021cd..5995210f6c4 100644 --- a/zh-CN/docs/1.1.0/engine_usage/flink/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/hive/index.html b/zh-CN/docs/1.1.0/engine_usage/hive/index.html index c56e252913f..bc18f5d5b1e 100644 --- a/zh-CN/docs/1.1.0/engine_usage/hive/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html b/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html index 804f53538e3..4b717d06c75 100644 --- a/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/overview/index.html b/zh-CN/docs/1.1.0/engine_usage/overview/index.html index ee026122094..e0709cddd08 100644 --- a/zh-CN/docs/1.1.0/engine_usage/overview/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/python/index.html b/zh-CN/docs/1.1.0/engine_usage/python/index.html index 5fc1154a88b..4c63f78bc1a 100644 --- a/zh-CN/docs/1.1.0/engine_usage/python/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/python/index.html @@ -7,21 +7,21 @@ Python 引擎 | Apache Linkis - +
    -
    Version: 1.1.0

    Python 引擎

    本文主要介绍在Linkis1.0中,Python引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 +

    Version: 1.1.0

    Python 引擎

    本文主要介绍在Linkis1.0中,Python引擎的配置、部署和使用。

    1.Python引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本。

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    2.3 python引擎的标签#

    此处可以使用默认的dml.sql进行插入即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的\$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. 对于Python任务您只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType 

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Python的使用如下:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,右键目录然后新建python脚本并编写python代码并点击执行。

    python的执行逻辑是通过 Py4j的方式,启动一个的python -的gateway,然后Python引擎将代码提交到python的执行器进行执行。

    图3-1 python的执行效果截图

    4.Python引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如python的版本和以及python需要加载的一些module等。

    图4-1 python的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/shell/index.html b/zh-CN/docs/1.1.0/engine_usage/shell/index.html index c06174cffc2..4937b7824b5 100644 --- a/zh-CN/docs/1.1.0/engine_usage/shell/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/engine_usage/spark/index.html b/zh-CN/docs/1.1.0/engine_usage/spark/index.html index e258c847621..491e57e7327 100644 --- a/zh-CN/docs/1.1.0/engine_usage/spark/index.html +++ b/zh-CN/docs/1.1.0/engine_usage/spark/index.html @@ -7,15 +7,15 @@ Spark 引擎 | Apache Linkis - +
    Version: 1.1.0

    Spark 引擎

    本文主要介绍在Linkis1.0中,spark引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用spark引擎,您需要保证以下的环境变量已经设置正确并且引擎的启动用户是有这些环境变量的。

    强烈建议您在执行spark任务之前,检查下执行用户的这些环境变量。

    环境变量名环境变量内容备注
    JAVA_HOMEJDK安装路径必须
    HADOOP_HOMEHadoop安装路径必须
    HADOOP_CONF_DIRHadoop配置路径必须
    HIVE_CONF_DIRHive配置路径必须
    SPARK_HOMESpark安装路径必须
    SPARK_CONF_DIRSpark配置路径必须
    pythonpython建议使用anaconda的python作为默认python

    表1-1 环境配置清单

    2.Spark引擎的配置和部署#

    2.1 spark版本的选择和编译#

    理论上Linkis1.0支持的spark2.x以上的所有版本。默认支持的版本Spark2.4.3。如果您想使用您的spark版本,如spark2.1.0,则您仅仅需要将插件spark的版本进行修改,然后进行编译即可。具体的,您可以找到linkis-engineplugin-spark模块,将\<spark.version>标签进行改成2.1.0,然后单独编译此模块即可。

    2.2 spark engineConn部署和加载#

    如果您已经编译完了您的spark引擎的插件已经编译完成,那么您需要将新的插件放置到指定的位置中才能加载,具体可以参考下面这篇文章

    EngineConnPlugin引擎插件安装

    2.3 spark引擎的标签#

    Linkis1.0是通过标签来进行的,所以需要在我们数据库中插入数据,插入的方式如下文所示。

    EngineConnPlugin引擎插件安装 > 2.2 管理台Configuration配置修改(可选)

    3.spark引擎的使用#

    准备操作,队列设置#

    因为spark的执行是需要队列的资源,所以用户在执行之前,必须要设置自己能够执行的队列。

    图3-1 队列设置 您也可以通过在提交参数的StartUpMap里面添加队列的值:startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. -对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    You can also add the queue value in the StartUpMap of the submission parameter: `startupMap.put("wds.linkis.rm.yarnqueue", "dws")`
    -

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), +对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    ## codeType对应关系 py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop
    +# 可以在提交参数通过-confMap wds.linkis.yarnqueue=dws  来指定yarn 队列sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql  -confMap wds.linkis.yarnqueue=dws -code "show tables"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), SparkSession初始化之后,就可以开始执行sql。

    图3-2 sparksql的执行效果截图

    spark-scala的任务,我们已经初始化好了sqlContext等变量,用户可以直接使用这个sqlContext进行sql的执行。

    图3-3 spark-scala的执行效果图

    类似的,pyspark的方式中,我们也已经初始化好了SparkSession,用户可以直接使用spark.sql的方式进行执行sql。

    图3-4 pyspark的执行方式

    4.spark引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如spark会话executor个数和executor的内存。这些参数是为了用户能够更加自由地设置自己的spark的参数,另外spark其他参数也可以进行修改,比如的pyspark的python版本等。

    图4-1 spark的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/introduction/index.html b/zh-CN/docs/1.1.0/introduction/index.html index 459ceaae22c..692a7f0cb5f 100644 --- a/zh-CN/docs/1.1.0/introduction/index.html +++ b/zh-CN/docs/1.1.0/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/release/index.html b/zh-CN/docs/1.1.0/release/index.html index 342f3a14232..afc542dd087 100644 --- a/zh-CN/docs/1.1.0/release/index.html +++ b/zh-CN/docs/1.1.0/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tags/index.html b/zh-CN/docs/1.1.0/tags/index.html index a7e4e6b5862..2bedb680d2a 100644 --- a/zh-CN/docs/1.1.0/tags/index.html +++ b/zh-CN/docs/1.1.0/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html index f7a5a73fa51..a1b405575d4 100644 --- a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html index 9d7075d7a4c..e27c6066c98 100644 --- a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html index 0ec059c5e83..1be0781a0fc 100644 --- a/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.1.0/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 17b5a11c4c0..f2148032c02 100644 --- a/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.1.0/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html b/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html index 242c0862c17..28dadd21672 100644 --- a/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/1.1.0/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/console_manual/index.html b/zh-CN/docs/1.1.0/user_guide/console_manual/index.html index 15581d85747..3f8bc4b6e56 100644 --- a/zh-CN/docs/1.1.0/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.1.0/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html b/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html index 1bf3a63836d..41f1311ba0c 100644 --- a/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.1.0/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html index 1210c6b453e..7a1b994cb4e 100644 --- a/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/1.1.0/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html index bfa5590261c..86cd58941cf 100644 --- a/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.1.0/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/overview/index.html b/zh-CN/docs/1.1.0/user_guide/overview/index.html index 35b76713116..afa459ec743 100644 --- a/zh-CN/docs/1.1.0/user_guide/overview/index.html +++ b/zh-CN/docs/1.1.0/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html b/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html index 14c87f54992..6cbf2c79e0a 100644 --- a/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.1.0/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html b/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html index ecd11bd0b4f..a8931c260c1 100644 --- a/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html +++ b/zh-CN/docs/1.1.1/api/http/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源接口 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html b/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html index 3df8d5f2774..7d2324ad682 100644 --- a/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html +++ b/zh-CN/docs/1.1.1/api/http/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ 引擎物料刷新接口 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html b/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html index 7d8b726a4aa..7ab486f6232 100644 --- a/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html +++ b/zh-CN/docs/1.1.1/api/http/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询接口 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/http/udf-api/index.html b/zh-CN/docs/1.1.1/api/http/udf-api/index.html index cd831ef89bc..dbb631a82c5 100644 --- a/zh-CN/docs/1.1.1/api/http/udf-api/index.html +++ b/zh-CN/docs/1.1.1/api/http/udf-api/index.html @@ -7,7 +7,7 @@ UDF接口 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/jdbc_api/index.html b/zh-CN/docs/1.1.1/api/jdbc_api/index.html index c1a4639e21c..40119f6dc0d 100644 --- a/zh-CN/docs/1.1.1/api/jdbc_api/index.html +++ b/zh-CN/docs/1.1.1/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html b/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html index fd1c74d805c..2b5a9cc8f1b 100644 --- a/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.1.1/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/login_api/index.html b/zh-CN/docs/1.1.1/api/login_api/index.html index 0fd3186f8c7..54d6d075f68 100644 --- a/zh-CN/docs/1.1.1/api/login_api/index.html +++ b/zh-CN/docs/1.1.1/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/api/overview/index.html b/zh-CN/docs/1.1.1/api/overview/index.html index cdd91ee1b1d..09cef9fc1b2 100644 --- a/zh-CN/docs/1.1.1/api/overview/index.html +++ b/zh-CN/docs/1.1.1/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html b/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html index a1c2d315a0f..8a056ba78de 100644 --- a/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.1.1/architecture/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 新增流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html b/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html index 9f73f6c565d..6f524532c2a 100644 --- a/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/1.1.1/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html b/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html index 97369eddea4..1266eb81d5c 100644 --- a/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.1.1/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -35,7 +35,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html index fdf765fb762..98934908909 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 8af16293cf4..ff46173ba82 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 22423a1e9f8..063209d862c 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html index 05ae2adeea5..f31300b7051 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html index 55f1a3407de..3f0bfef69c8 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 39e6984ac3a..27a064038fe 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 4845df74a01..057777d1677 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html index e696f84aa9e..f26b7609123 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index cdcce2c71c4..4559bb709cc 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html index 4c97433eb12..a6ce8a16136 100644 --- a/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html index 644a8442e81..3663aa7fb0a 100644 --- a/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.1.1/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html index a51d1aebbd7..a041c9b9f90 100644 --- a/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.1.1/architecture/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Job 提交准备执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html index 5738910acfa..0fd841fd532 100644 --- a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html index 769959d9ac8..14ceb4c20db 100644 --- a/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/overview/index.html b/zh-CN/docs/1.1.1/architecture/overview/index.html index 8fddfabf983..5e6d611b292 100644 --- a/zh-CN/docs/1.1.1/architecture/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/proxy_user/index.html b/zh-CN/docs/1.1.1/architecture/proxy_user/index.html index 4389ec971b3..49a03e2fc9d 100644 --- a/zh-CN/docs/1.1.1/architecture/proxy_user/index.html +++ b/zh-CN/docs/1.1.1/architecture/proxy_user/index.html @@ -7,7 +7,7 @@ 代理用户模式 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index c4d7ab2af0e..56a4dce4e7b 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html index c144cdd8a39..54931f45537 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html index 8755cf00742..b7310e1af79 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 8ea7330f42c..0b3c1bf4dd4 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html index 82be10fa4dd..93973c35737 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index ae630932dd6..ee6b4f7024e 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 6763a8510de..d743ca0d358 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index b96b46545fb..cd3b35dd7d1 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html index a338cb9458c..ad22e874a39 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html index d65c65e9e7e..c3827f3d0dc 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html index 872d064c7e4..0968b340a0c 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html index 6673a43dde0..35dd1eb14ee 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html index 0233c4d3e78..dc4421223bb 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html index 12fcfe3be3a..e3ece1f55f6 100644 --- a/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.1.1/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html b/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html index fce65d9bf66..d722e54dc18 100644 --- a/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.1.1/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html index dee9e8713ec..46e8feeb5b7 100644 --- a/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.1.1/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 引擎的安装 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html index 0d33410d225..877da4839c4 100644 --- a/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.1/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 部署后的目录结构 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html index 844e2dffb16..9b6f3d36525 100644 --- a/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/1.1.1/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html b/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html index 9559b8c465e..6374fae3c17 100644 --- a/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html +++ b/zh-CN/docs/1.1.1/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ 工具 scriptis 的安装部署 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html b/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html index 87762a7f5af..f27d3bd9cf5 100644 --- a/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.1.1/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速单机部署 | Apache Linkis - + @@ -76,7 +76,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html index ff010468427..6525e91feed 100644 --- a/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.1/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html b/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html index 4475d08b5b3..600f5667938 100644 --- a/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/1.1.1/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html b/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html index d9f257a6735..d9e8108a14a 100644 --- a/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.1/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/deployment/web_install/index.html b/zh-CN/docs/1.1.1/deployment/web_install/index.html index 73984df125f..4678127b525 100644 --- a/zh-CN/docs/1.1.1/deployment/web_install/index.html +++ b/zh-CN/docs/1.1.1/deployment/web_install/index.html @@ -7,7 +7,7 @@ 管理台部署 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html index 871844218d6..8f53eb71d63 100644 --- a/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 后端编译打包 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_config/index.html b/zh-CN/docs/1.1.1/development/linkis_config/index.html index 77145074896..99da65b5e7c 100644 --- a/zh-CN/docs/1.1.1/development/linkis_config/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_debug/index.html b/zh-CN/docs/1.1.1/development/linkis_debug/index.html index 4d6fa91c6aa..da6713d6aed 100644 --- a/zh-CN/docs/1.1.1/development/linkis_debug/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 调试指引 | Apache Linkis - + @@ -51,7 +51,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html b/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html index ccdaf3e1012..54dbd79a791 100644 --- a/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/1.1.1/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/new_engine_conn/index.html b/zh-CN/docs/1.1.1/development/new_engine_conn/index.html index 5fb8c8eaa39..858bc2fa4f1 100644 --- a/zh-CN/docs/1.1.1/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.1.1/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -56,7 +56,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/development/web_build/index.html b/zh-CN/docs/1.1.1/development/web_build/index.html index d06a57f912a..a282cc02514 100644 --- a/zh-CN/docs/1.1.1/development/web_build/index.html +++ b/zh-CN/docs/1.1.1/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis 管理台编译 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/flink/index.html b/zh-CN/docs/1.1.1/engine_usage/flink/index.html index a759faf27af..2d12a139217 100644 --- a/zh-CN/docs/1.1.1/engine_usage/flink/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/hive/index.html b/zh-CN/docs/1.1.1/engine_usage/hive/index.html index dbbf3d7a342..0875e470166 100644 --- a/zh-CN/docs/1.1.1/engine_usage/hive/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html b/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html index 5c71d2461fe..3cf7214b455 100644 --- a/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html b/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html index 39a2392886a..32f3ec5270d 100644 --- a/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ openLooKeng 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/overview/index.html b/zh-CN/docs/1.1.1/engine_usage/overview/index.html index 0a3245a4040..81e0ab6ea71 100644 --- a/zh-CN/docs/1.1.1/engine_usage/overview/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html b/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html index 487a2a2a8f8..b6db9eb2288 100644 --- a/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ Pipeline 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/python/index.html b/zh-CN/docs/1.1.1/engine_usage/python/index.html index ff539658392..e111ae3c46c 100644 --- a/zh-CN/docs/1.1.1/engine_usage/python/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/python/index.html @@ -7,21 +7,21 @@ Python 引擎 | Apache Linkis - +
    -
    Version: 1.1.1

    Python 引擎

    本文主要介绍在Linkis1.X中,Python引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 +

    Version: 1.1.1

    Python 引擎

    本文主要介绍在Linkis1.X中,Python引擎的配置、部署和使用。

    1.Python引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本。

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. 对于Python任务您只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType 

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Python的使用如下:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,右键目录然后新建python脚本并编写python代码并点击执行。

    python的执行逻辑是通过 Py4j的方式,启动一个的python -的gateway,然后Python引擎将代码提交到python的执行器进行执行。

    图3-1 python的执行效果截图

    4.Python引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如python的版本和以及python需要加载的一些module等。

    python

    图4-1 python的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/shell/index.html b/zh-CN/docs/1.1.1/engine_usage/shell/index.html index fe1be0e69bc..7dd65ea13ee 100644 --- a/zh-CN/docs/1.1.1/engine_usage/shell/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/engine_usage/spark/index.html b/zh-CN/docs/1.1.1/engine_usage/spark/index.html index fa0c805fee9..935c3a06ce9 100644 --- a/zh-CN/docs/1.1.1/engine_usage/spark/index.html +++ b/zh-CN/docs/1.1.1/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -15,7 +15,8 @@
    Version: 1.1.1

    Spark 引擎

    本文主要介绍在Linkis1.X中,spark引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用spark引擎,您需要保证以下的环境变量已经设置正确并且引擎的启动用户是有这些环境变量的。

    强烈建议您在执行spark任务之前,检查下执行用户的这些环境变量。

    环境变量名环境变量内容备注
    JAVA_HOMEJDK安装路径必须
    HADOOP_HOMEHadoop安装路径必须
    HADOOP_CONF_DIRHadoop配置路径必须
    HIVE_CONF_DIRHive配置路径必须
    SPARK_HOMESpark安装路径必须
    SPARK_CONF_DIRSpark配置路径必须
    pythonpython建议使用anaconda的python作为默认python

    表1-1 环境配置清单

    2.Spark引擎的配置和部署#

    2.1 spark版本的选择和编译#

    注意: 编译spark引擎之前需要进行linkis项目全量编译 理论上Linkis1.X支持的spark2.x以上的所有版本。默认支持的版本为Spark2.4.3。如果您想使用其他的spark版本,如spark2.1.0,则您仅仅需要将插件spark的版本进行修改,然后进行编译即可。具体的,您可以找到linkis-engineplugin-spark模块,将maven依赖中"spark.version"标签的值改成2.1.0,然后单独编译此模块即可。

    2.2 spark engineConn部署和加载#

    如果您已经编译完了您的spark引擎的插件,那么您需要将新的插件放置到指定的位置中才能加载,具体可以参考下面这篇文章

    EngineConnPlugin引擎插件安装

    2.3 spark引擎的标签#

    Linkis1.X是通过标签配置来区分引擎版本的,所以需要我们在数据库中插入数据,插入的方式如下文所示。

    EngineConnPlugin引擎插件安装 > 2.2 管理台Configuration配置修改(可选)

    3.spark引擎的使用#

    准备操作,队列设置#

    因为spark的执行需要队列的资源,所以用户在执行之前,必须要设置自己能够执行的队列。

    yarn

    图3-1 队列设置 您也可以通过在提交参数的StartUpMap里面添加队列的值:startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. -对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    #You can also add the queue value in the StartUpMap of the submission parameter: startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), +对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    ## codeType对应关系 py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop
    +# 可以在提交参数通过-confMap wds.linkis.yarnqueue=dws  来指定yarn 队列sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql  -confMap wds.linkis.yarnqueue=dws -code "show tables"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), SparkSession初始化之后,就可以开始执行sql。

    图3-2 sparksql的执行效果截图

    spark-scala的任务,我们已经初始化好了sqlContext等变量,用户可以直接使用这个sqlContext进行sql的执行。

    图3-3 spark-scala的执行效果图

    类似的,pyspark的方式中,我们也已经初始化好了SparkSession,用户可以直接使用spark.sql的方式进行执行sql。

    图3-4 pyspark的执行方式

    4.spark引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如spark会话executor个数和executor的内存。这些参数是为了用户能够更加自由地设置自己的spark的参数,另外spark其他参数也可以进行修改,比如的pyspark的python版本等。

    spark

    图4-1 spark的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/introduction/index.html b/zh-CN/docs/1.1.1/introduction/index.html index fc4acb3415f..097749052d2 100644 --- a/zh-CN/docs/1.1.1/introduction/index.html +++ b/zh-CN/docs/1.1.1/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/release/index.html b/zh-CN/docs/1.1.1/release/index.html index 77bbd5421cd..46910e2c9da 100644 --- a/zh-CN/docs/1.1.1/release/index.html +++ b/zh-CN/docs/1.1.1/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/table/udf-table/index.html b/zh-CN/docs/1.1.1/table/udf-table/index.html index 115e324f7a0..06cc26f3080 100644 --- a/zh-CN/docs/1.1.1/table/udf-table/index.html +++ b/zh-CN/docs/1.1.1/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF 的表结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tags/index.html b/zh-CN/docs/1.1.1/tags/index.html index 27c7d40f368..711ed56b300 100644 --- a/zh-CN/docs/1.1.1/tags/index.html +++ b/zh-CN/docs/1.1.1/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html index d5ad897f849..7f454bb360e 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html index a02b5a76bd3..392d866eea3 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/error_code/index.html @@ -7,7 +7,7 @@ 常见错误码及处理方法 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html index abc29fdeec3..02048dd8613 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/error_guide/interface/index.html @@ -7,7 +7,7 @@ 接口错误排查 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html index e9e3f0d6a08..a1e3b6f38aa 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html index 0828bcddadc..670e2b533e7 100644 --- a/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.1.1/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index 28f56eee4b4..0a22b2efc3d 100644 --- a/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.1.1/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html b/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html index bd2b49501e9..fa504d90ee4 100644 --- a/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/1.1.1/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -37,7 +37,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/console_manual/index.html b/zh-CN/docs/1.1.1/user_guide/console_manual/index.html index 365547c0b67..04532a0e6ab 100644 --- a/zh-CN/docs/1.1.1/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.1.1/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html b/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html index 6f047a680bd..f01dd33b748 100644 --- a/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.1.1/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html index c82a60be6aa..924e7a57d0e 100644 --- a/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/1.1.1/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html index 5204d692f0a..06ba5d4696c 100644 --- a/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.1.1/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/overview/index.html b/zh-CN/docs/1.1.1/user_guide/overview/index.html index b949f8ae5db..d3aaac50c90 100644 --- a/zh-CN/docs/1.1.1/user_guide/overview/index.html +++ b/zh-CN/docs/1.1.1/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html b/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html index 2e774b6dc4d..376c8b7359e 100644 --- a/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.1.1/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.1/user_guide/udf/index.html b/zh-CN/docs/1.1.1/user_guide/udf/index.html index 4d674209556..0fc9d8333c5 100644 --- a/zh-CN/docs/1.1.1/user_guide/udf/index.html +++ b/zh-CN/docs/1.1.1/user_guide/udf/index.html @@ -7,7 +7,7 @@ UDF 的使用 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index 72774d28166..b68137a03d8 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ 引擎插件API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index 2b52a6109a3..1236cb5c41b 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ 引擎物料刷新API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html index e625bc8ef25..8b216d8a84b 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ 任务管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html index 09327176578..8f1cc2e6cec 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ 任务操作 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index 41852bb75d7..6bc11a1698e 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC资源信息管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index a2dc39f0af3..9b9dcb825c4 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index 4616a881e1a..ba026625e7b 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ 引擎管理 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 97cc61c92af..8544e2aa4f6 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ 资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html index 6f6695245cb..0cc61a5030d 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ 上下文历史记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index 6b8336a7860..06751a6bf7d 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ 上下文监听服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index f8422435ee8..a0ca9cec345 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ 上下文记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html index 34b8736449a..79cfcf3bc89 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ 上下文API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index 93ed2c5d035..4d1d5cf59b8 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM项目操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index bd329c4015f..4151b95d1a3 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index 5a05b7bb40c..9f11a6c0030 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html index 951168675c0..6cd1191a7d6 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ 通用API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index 81aeb7228c2..07b54c3eb48 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源API | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html index 0622bde1203..2384d3274ba 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ 文件系统 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index a246c272f8b..41e44517107 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ 添加全局变量 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index c9fbc2d6d61..61e7f6803da 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ 管理台首页API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index 606aec73c59..7cc28c8df5b 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ 实例管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index fe8e7748685..f0b7c5fdfb3 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ 历史作业API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html index 54af4762eb5..e9052a61e8f 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis错误代码 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index bc3e8fe859c..25f12e9d857 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq表API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index 4dba83dbbe1..87b4e896e68 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index 655d3b695c1..f060381ac21 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ 参数配置 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html index ba8bdbd6638..c3de7cf04a8 100644 --- a/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/zh-CN/docs/1.1.3/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/jdbc_api/index.html b/zh-CN/docs/1.1.3/api/jdbc_api/index.html index 0fe2c98e5e8..429025098bb 100644 --- a/zh-CN/docs/1.1.3/api/jdbc_api/index.html +++ b/zh-CN/docs/1.1.3/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html b/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html index cf55b04c20b..458361158bc 100644 --- a/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html +++ b/zh-CN/docs/1.1.3/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/login_api/index.html b/zh-CN/docs/1.1.3/api/login_api/index.html index 5f8065ff69d..2923ec74253 100644 --- a/zh-CN/docs/1.1.3/api/login_api/index.html +++ b/zh-CN/docs/1.1.3/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/api/overview/index.html b/zh-CN/docs/1.1.3/api/overview/index.html index 5082b781e0d..08dcbbcb4be 100644 --- a/zh-CN/docs/1.1.3/api/overview/index.html +++ b/zh-CN/docs/1.1.3/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html b/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html index f02b2940d0d..09739ab44cc 100644 --- a/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html +++ b/zh-CN/docs/1.1.3/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -35,7 +35,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/commons/variable/index.html b/zh-CN/docs/1.1.3/architecture/commons/variable/index.html index 283d7993e0d..cb0adf2423c 100644 --- a/zh-CN/docs/1.1.3/architecture/commons/variable/index.html +++ b/zh-CN/docs/1.1.3/architecture/commons/variable/index.html @@ -7,7 +7,7 @@ 自定义变量设计 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index b937dd63bb5..62afeb34f22 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 启动流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html index b03e06e0ace..69ef973d176 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html index ae3b1f6b912..9cb15b0c1a8 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_history/index.html @@ -7,7 +7,7 @@ EngineConn 历史信息记录特性 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 54f2372af3b..9d05ec155fa 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html index 7bc4d4d3675..8eafa6dd355 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_metrics/index.html @@ -7,7 +7,7 @@ EngineConn Metrics 上报特性 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 6c627010257..d5538726dff 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html index 7d7c66642a4..93f9991f104 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index a9278683c18..00478f8725a 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Linkis任务执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html index 57f816761b5..784838dd922 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 0b839443610..54b0c088a3e 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html index 8d32e4cde50..e7a47005b65 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html index 6e9763a3542..a712f37fd2f 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 7979e666878..677652ee48c 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html index c8e47e76aed..d91767b604d 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html b/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html index fb40651b544..a22e5ccf45b 100644 --- a/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html +++ b/zh-CN/docs/1.1.3/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Linkis支持代理用户提交架构涉及 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html index 254a5ca8ad1..d40ac498b09 100644 --- a/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/1.1.3/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html index 9d14ebfece6..b47edca8e5b 100644 --- a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html index 09445f0c72c..5b6a8052643 100644 --- a/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/overview/index.html b/zh-CN/docs/1.1.3/architecture/overview/index.html index fce77a8757f..5f6ccce1dab 100644 --- a/zh-CN/docs/1.1.3/architecture/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 826157e29df..f73b4565f9b 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html index 36b9846f8e8..0bb628dc750 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html index 97774e26739..7463f28ecb9 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/content_service_cleanup/index.html @@ -7,7 +7,7 @@ CS 清理接口特性 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html index 0cd4b203631..7f16dfd25fa 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 06bab08bee9..89638d88a3c 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html index c4b39bad013..1045dbce7fc 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 16b70a1ff7b..e6a45799ac1 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html index a3ec005e8b4..60e6fb34c77 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index df45e4e98fb..322d2473378 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html index 81c97d70ef7..a1c372c54c0 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html index 336dbf09380..af20cf2471c 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html index f6356947c8b..9ce15688478 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html index c250f730f88..1d920a85c81 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html index 2beeed60c7b..cb212c6ce94 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html index a404ba5243a..ab37fea9bfe 100644 --- a/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/1.1.3/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html b/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html index 7d5d067cb67..43b7f43cad8 100644 --- a/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/1.1.3/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html b/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html index c141a78862c..b4fec58e688 100644 --- a/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html +++ b/zh-CN/docs/1.1.3/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Linkis 去 HDFS 部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html index 3e44d13b43f..695d8c57e3e 100644 --- a/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/1.1.3/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 引擎的安装 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html index cfbd8ec18b7..eac727378c7 100644 --- a/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.3/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 部署后的目录结构 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html b/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html index b639a813556..ad9a0af2424 100644 --- a/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html +++ b/zh-CN/docs/1.1.3/deployment/involve_knife4j_into_linkis/index.html @@ -7,7 +7,7 @@ 启用 Knife4j | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html b/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html index 95faab31c14..df96662842e 100644 --- a/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html +++ b/zh-CN/docs/1.1.3/deployment/involve_prometheus_into_linkis/index.html @@ -7,7 +7,7 @@ 开启Prometheus监控 | Apache Linkis - + @@ -30,7 +30,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html index 8a833ca02a3..e42ac880df5 100644 --- a/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/1.1.3/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html b/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html index dc6fa3702ab..2ca77639bac 100644 --- a/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html +++ b/zh-CN/docs/1.1.3/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ 工具 scriptis 的安装部署 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html b/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html index 1b188773e7e..8f7c3863ae9 100644 --- a/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html +++ b/zh-CN/docs/1.1.3/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速单机部署 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html index 1c1882ae089..fd03392d1aa 100644 --- a/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.3/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html b/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html index 3c46dc40cf6..6abf5d7b9e0 100644 --- a/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/1.1.3/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html b/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html index a86600e97ab..f6776b4e3cb 100644 --- a/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html +++ b/zh-CN/docs/1.1.3/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/deployment/web_install/index.html b/zh-CN/docs/1.1.3/deployment/web_install/index.html index b081aad8748..7bc972cf935 100644 --- a/zh-CN/docs/1.1.3/deployment/web_install/index.html +++ b/zh-CN/docs/1.1.3/deployment/web_install/index.html @@ -7,7 +7,7 @@ 管理台部署 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html b/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html index d393dc0489c..392f551865b 100644 --- a/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 后端编译打包 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_config/index.html b/zh-CN/docs/1.1.3/development/linkis_config/index.html index 27381f836e2..02f53c5a293 100644 --- a/zh-CN/docs/1.1.3/development/linkis_config/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_debug/index.html b/zh-CN/docs/1.1.3/development/linkis_debug/index.html index 9f610f4a3cc..4415cb453af 100644 --- a/zh-CN/docs/1.1.3/development/linkis_debug/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 服务调试指引 | Apache Linkis - + @@ -52,7 +52,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html b/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html index 486f631f7aa..bcf880fdd83 100644 --- a/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/1.1.3/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/new_engine_conn/index.html b/zh-CN/docs/1.1.3/development/new_engine_conn/index.html index 5c6c83fad78..2221944a78d 100644 --- a/zh-CN/docs/1.1.3/development/new_engine_conn/index.html +++ b/zh-CN/docs/1.1.3/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/swwager_instructions/index.html b/zh-CN/docs/1.1.3/development/swwager_instructions/index.html index f97fd5757cc..a507a63a191 100644 --- a/zh-CN/docs/1.1.3/development/swwager_instructions/index.html +++ b/zh-CN/docs/1.1.3/development/swwager_instructions/index.html @@ -7,7 +7,7 @@ Swwager 注解使用说明 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/development/web_build/index.html b/zh-CN/docs/1.1.3/development/web_build/index.html index 592e82fcba1..8bb67828e2b 100644 --- a/zh-CN/docs/1.1.3/development/web_build/index.html +++ b/zh-CN/docs/1.1.3/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis 管理台编译 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/flink/index.html b/zh-CN/docs/1.1.3/engine_usage/flink/index.html index 41365e9d28b..e2a630cf3ea 100644 --- a/zh-CN/docs/1.1.3/engine_usage/flink/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/hive/index.html b/zh-CN/docs/1.1.3/engine_usage/hive/index.html index 8e957b4930c..17b95622f4d 100644 --- a/zh-CN/docs/1.1.3/engine_usage/hive/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html b/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html index 6df8922ed41..853cd6ade6a 100644 --- a/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html b/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html index 075a195f07e..6145c785076 100644 --- a/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ openLooKeng 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/overview/index.html b/zh-CN/docs/1.1.3/engine_usage/overview/index.html index ae444cf8902..40c20894d8c 100644 --- a/zh-CN/docs/1.1.3/engine_usage/overview/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html b/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html index a872167f579..332b8ea1df5 100644 --- a/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ Pipeline 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/python/index.html b/zh-CN/docs/1.1.3/engine_usage/python/index.html index 4cd4b80dae2..331e0c5f13d 100644 --- a/zh-CN/docs/1.1.3/engine_usage/python/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/python/index.html @@ -7,21 +7,24 @@ Python 引擎 | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Python 引擎

    本文主要介绍在Linkis1.X中,Python引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 -python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本。

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. +

    Version: Next(1.1.3)

    Python 引擎

    本文主要介绍在Linkis1.X中,Python引擎的配置、部署和使用。

    1.Python引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 +python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本,具体配置如下。

    #1:cli的方式提交任务进行版本切换,命令末端设置版本 python.version=python3 (python3:创建软连接时生成文件的名称,可自定义命名)sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop  -confMap  python.version=python3
    +#2:cli的方式提交任务进行版本切换,命令设置加入版本路径 python.version=/usr/bin/python (/usr/bin/python:创建软连接时生成文件的路径)sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop  -confMap  python.version=/usr/bin/python
    +

    页面配置: +

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. 对于Python任务您只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType 

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Python的使用如下:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,右键目录然后新建python脚本并编写python代码并点击执行。

    python的执行逻辑是通过 Py4j的方式,启动一个的python -的gateway,然后Python引擎将代码提交到python的执行器进行执行。

    图3-1 python的执行效果截图

    4.Python引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如python的版本和以及python需要加载的一些module等。

    python

    图4-1 python的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/shell/index.html b/zh-CN/docs/1.1.3/engine_usage/shell/index.html index ab80c40794c..a34818e774f 100644 --- a/zh-CN/docs/1.1.3/engine_usage/shell/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/spark/index.html b/zh-CN/docs/1.1.3/engine_usage/spark/index.html index e625bf97465..6f3bcb15a34 100644 --- a/zh-CN/docs/1.1.3/engine_usage/spark/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -15,7 +15,8 @@
    Version: Next(1.1.3)

    Spark 引擎

    本文主要介绍在Linkis1.X中,spark引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用spark引擎,您需要保证以下的环境变量已经设置正确并且引擎的启动用户是有这些环境变量的。

    强烈建议您在执行spark任务之前,检查下执行用户的这些环境变量。

    环境变量名环境变量内容备注
    JAVA_HOMEJDK安装路径必须
    HADOOP_HOMEHadoop安装路径必须
    HADOOP_CONF_DIRHadoop配置路径必须
    HIVE_CONF_DIRHive配置路径必须
    SPARK_HOMESpark安装路径必须
    SPARK_CONF_DIRSpark配置路径必须
    pythonpython建议使用anaconda的python作为默认python

    表1-1 环境配置清单

    2.Spark引擎的配置和部署#

    2.1 spark版本的选择和编译#

    注意: 编译spark引擎之前需要进行linkis项目全量编译 理论上Linkis1.X支持的spark2.x以上的所有版本。默认支持的版本为Spark2.4.3。如果您想使用其他的spark版本,如spark2.1.0,则您仅仅需要将插件spark的版本进行修改,然后进行编译即可。具体的,您可以找到linkis-engineplugin-spark模块,将maven依赖中"spark.version"标签的值改成2.1.0,然后单独编译此模块即可。

    2.2 spark engineConn部署和加载#

    如果您已经编译完了您的spark引擎的插件,那么您需要将新的插件放置到指定的位置中才能加载,具体可以参考下面这篇文章

    EngineConnPlugin引擎插件安装

    2.3 spark引擎的标签#

    Linkis1.X是通过标签配置来区分引擎版本的,所以需要我们在数据库中插入数据,插入的方式如下文所示。

    EngineConnPlugin引擎插件安装 > 2.2 管理台Configuration配置修改(可选)

    3.spark引擎的使用#

    准备操作,队列设置#

    因为spark的执行需要队列的资源,所以用户在执行之前,必须要设置自己能够执行的队列。

    yarn

    图3-1 队列设置 您也可以通过在提交参数的StartUpMap里面添加队列的值:startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. -对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    #You can also add the queue value in the StartUpMap of the submission parameter: startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), +对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    # codeType对应关系 py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop
    +# 可以在提交参数通过-confMap wds.linkis.yarnqueue=dws  来指定yarn 队列sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql  -confMap wds.linkis.yarnqueue=dws -code "show tables"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), SparkSession初始化之后,就可以开始执行sql。

    图3-2 sparksql的执行效果截图

    spark-scala的任务,我们已经初始化好了sqlContext等变量,用户可以直接使用这个sqlContext进行sql的执行。

    图3-3 spark-scala的执行效果图

    类似的,pyspark的方式中,我们也已经初始化好了SparkSession,用户可以直接使用spark.sql的方式进行执行sql。

    图3-4 pyspark的执行方式

    4.spark引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如spark会话executor个数和executor的内存。这些参数是为了用户能够更加自由地设置自己的spark的参数,另外spark其他参数也可以进行修改,比如的pyspark的python版本等。

    spark

    图4-1 spark的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html b/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html index c1b2312d7d2..0a4d870a1d9 100644 --- a/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html +++ b/zh-CN/docs/1.1.3/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop 引擎 | Apache Linkis - + @@ -32,7 +32,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/introduction/index.html b/zh-CN/docs/1.1.3/introduction/index.html index 75411b21a16..1061f87a193 100644 --- a/zh-CN/docs/1.1.3/introduction/index.html +++ b/zh-CN/docs/1.1.3/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html b/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html index b3d828e6d23..a3bbd5dfb02 100644 --- a/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html +++ b/zh-CN/docs/1.1.3/release-notes-1.1.3/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.3-RC1 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/release/index.html b/zh-CN/docs/1.1.3/release/index.html index 0e9d044d086..1800a8e7971 100644 --- a/zh-CN/docs/1.1.3/release/index.html +++ b/zh-CN/docs/1.1.3/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/table/udf-table/index.html b/zh-CN/docs/1.1.3/table/udf-table/index.html index 6924e3f7a47..597e8654985 100644 --- a/zh-CN/docs/1.1.3/table/udf-table/index.html +++ b/zh-CN/docs/1.1.3/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF 的表结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tags/feature/index.html b/zh-CN/docs/1.1.3/tags/feature/index.html index 78f6e3b0591..28b9db1c3a4 100644 --- a/zh-CN/docs/1.1.3/tags/feature/index.html +++ b/zh-CN/docs/1.1.3/tags/feature/index.html @@ -7,7 +7,7 @@ One doc tagged with "Feature" | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tags/index.html b/zh-CN/docs/1.1.3/tags/index.html index 67aeff3efee..fae3f939d89 100644 --- a/zh-CN/docs/1.1.3/tags/index.html +++ b/zh-CN/docs/1.1.3/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html index d2cb2a3cc8e..0787a5490cd 100644 --- a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html index 5a146e1f2ee..ea2a45dbe7f 100644 --- a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html index f6701037c27..3b9fccf3c48 100644 --- a/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/1.1.3/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index e4125589152..949b6691e96 100644 --- a/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/1.1.3/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html b/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html index 197b86bf9f3..6872f87e3c0 100644 --- a/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/1.1.3/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/console_manual/index.html b/zh-CN/docs/1.1.3/user_guide/console_manual/index.html index 26d2439015e..993d30b080e 100644 --- a/zh-CN/docs/1.1.3/user_guide/console_manual/index.html +++ b/zh-CN/docs/1.1.3/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html b/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html index 884590fe65c..ca4f921cdf6 100644 --- a/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html +++ b/zh-CN/docs/1.1.3/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html index 02e41f19c40..6e84aa92ccd 100644 --- a/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/1.1.3/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html b/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html index 911184d5442..8747e25425c 100644 --- a/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/1.1.3/user_guide/linkiscli_manual/index.html @@ -7,19 +7,19 @@ Linkis-Cli 方式使用 | Apache Linkis - +
    -
    Version: Next(1.1.3)

    Linkis-Cli 方式使用

    1. 介绍#

    Linkis-Cli 是一个用于向Linkis提交任务的Shell命令行程序。

    2. 基础案例#

    您可以参照下面的例子简单地向Linkis提交任务

    第一步,检查conf/目录下是否存在默认配置文件linkis-cli.properties,且包含以下配置:

       #linkis-mg-gateway服务地址   wds.linkis.client.common.gatewayUrl=http://127.0.0.1:9001   #认证鉴权策略 token/static    wds.linkis.client.common.authStrategy=token   #static 模式下为用户名/密码,token模式下为linkis-mg-gateway_auth_token表中token_name 和logal_users   wds.linkis.client.common.tokenKey=Validation-Code   wds.linkis.client.common.tokenValue=BML-AUTH

    第二步,进入linkis安装目录,输入指令:

        ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop 

    第三步,您会在控制台看到任务被提交到linkis,并开始执行的信息。

    linkis-cli目前仅支持同步提交,即向linkis提交任务后,不断询问任务状态、拉取任务日志,直至任务结束。任务结束时状态如果为成功,linkis-cli还会主动拉取结果集并输出。

    3. 使用方式#

       ./bin/linkis-cli   [客户端参数][引擎参数] [启动运行参数]

    4. 支持的参数列表#

    • 客户端参数

      参数说明数据类型是否必填
      --gatewayUrl手动指定linkis-mg-gateway服务的地址String
      --authStg指定认证策略 token/staticString
      --authKey指定认证keyString
      --authVal指定认证valueString
      --userConf指定配置文件位置String
      --kill指定kill的taskId,执行任务停止命令String
      --log指定执行的相关日志路径String
      --resultString
      --statusString
      --asyncString
      --modeujes/onceString
    • 引擎相关参数

      参数说明数据类型是否必填
      -engineType指定此任务的引擎类型,带版本号 如 hive-2.3.3String
      -codeType为此作业指定 linkis的runType 如sql/hql/shellString
      -codePath本地执行代码文件路径String
      -code要执行的代码String
      -scriptPath为上传的脚本指定远程路径String
      -submitUser提交的用户String
      -proxyUser指定在 Linkis 服务器端执行您的代码的代理用户String
      -creator指定此任务的创建者,系统级别 如 IDE/LINKISCLIString
      -outPath指定结果集的输出路径。 如果未指定,则输出重置为屏幕(标准输出)String
    • Map启动运行参数

      参数说明数据类型是否必填
      -confMap启动参数,可以将任何启动参数放入此 Map(例如 spark.executor.instances),输入格式:-confMap key1=value1 -confMap key2=value2String
      -runtimeMap运行参数,可以将任何运行参数放入此 Map(例如 jdbc.url=jdbc:hive2://xxx:10000), 输入格式:-runtimeMap key1=value1 -runtimeMap key2=value2String
      -varMap指定变量映射,变量用于关键字替换。如执行语句'select form ${table_x} limit ${limit_y}' 通过varMap指定关键字变量替换, -varMap table_x=test -varMap limit_y=100String
      -labelMap标签映射String
      -jobContentMapjobContent 映射String

    5. 详细示例#

    5.1 客户端参数#

    客户端参数可以通过手动指定的方式传入,此方式会覆盖默认配置文件linkis-cli.properties中的冲突配置项 -可以通过配置文件进行配置

        ./bin/linkis-cli --gatewayUrl http://127.0.0.1:9001  --authStg token --authKey [tokenKey] --authVal [tokenValue]  -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

    5.2 添加引擎启动参数#

    按实际使用场景如果不涉及 可以不配置

    引擎的初始参数可以通过-confMap参数添加,注意参数的数据类型是Map,命令行的输入格式如下:

    -confMap key1=val1 -confMap key2=val2 ...

    例如:以下示例设置了引擎启动的yarn队列、spark executor个数等启动参数:

       ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -confMap wds.linkis.yarnqueue=q02 -confMap spark.executor.instances=3 -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

    当然,这些参数也支持以配置文件的方式读取,见【5.5 使用用户的配置文件】

    5.2 添加引擎运行参数#

    按实际使用场景,如果不涉及 可以不配置

    引擎的初始参数可以通过-runtimeMap参数添加,注意参数的数据类型是Map,命令行的输入格式如下:

    -runtimeMap key1=val1 -confMap key2=val2 ...

    例如:以下示例设置了jdbc引擎运行是需要的连接地址/用户名/密码 等参数

     sh ./bin/linkis-cli -submitUser hadoop   -engineType jdbc-4 -codeType jdbc  -code "show tables" -runtimeMap jdbc.url=jdbc:mysql://127.0.0.1:36000/hive_meta  -runtimeMap jdbc.username=test -runtimeMap  jdbc.password=test@123

    当然,这些参数也支持以配置文件的方式读取,见【5.5 使用用户的配置文件】

    5.3 添加标签#

    标签可以通过-labelMap参数添加,与-confMap一样,-labelMap参数的类型也是Map:

       /bin/linkis-cli -engineType spark-2.4.3 -codeType sql -labelMap labelKey=labelVal -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  

    5.4 变量替换#

    Linkis-cli的变量替换通过${}符号和-varMap共同实现

       ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from \${key};" -varMap key=testdb.test  -submitUser hadoop -proxyUser hadoop  

    执行过程中sql语句会被替换为:

       select count(*) from testdb.test

    注意'\$'中的转义符是为了防止参数被linux提前解析,如果是-codePath指定本地脚本方式,则不需要转义符

    5.5 使用用户的配置文件#

    1. linkis-cli支持加载用户自定义配置文件,配置文件路径通过--userConf参数指定,配置文件需要是.properties文件格式,默认是使用 conf/linkis-cli/linkis-cli.properties 配置文件
       ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "select count(*) from testdb.test;"  -submitUser hadoop -proxyUser hadoop  --userConf [配置文件路径]
    1. 哪些参数可以配置?

    所有参数都可以配置化,例如:

    cli参数:

       wds.linkis.client.common.gatewayUrl=http://127.0.0.1:9001   wds.linkis.client.common.authStrategy=static   wds.linkis.client.common.tokenKey=[静态认证key]   wds.linkis.client.common.tokenValue=[静态认证value]

    参数:

       wds.linkis.client.label.engineType=spark-2.4.3   wds.linkis.client.label.codeType=sql

    Map类参数配置化时,key的格式为

        [Map前缀] + [key]    

    通过前缀区分参数的类型(启动参数,运行参数等)

    Map前缀包括:

    • executionMap前缀: wds.linkis.client.exec
    • sourceMap前缀: wds.linkis.client.source
    • confMap前缀: wds.linkis.client.param.conf
    • runtimeMap前缀: wds.linkis.client.param.runtime
    • labelMap前缀: wds.linkis.client.label

    注意:

    1. variableMap不支持配置化

    2. 当配置的key和指令参数中已输入的key存在冲突时,优先级如下:

      指令参数 > 指令Map类型参数中的key > 用户配置 > 默认配置

    示例:

    配置引擎启动参数:

       wds.linkis.client.param.conf.spark.executor.instances=3   wds.linkis.client.param.conf.wds.linkis.yarnqueue=q02

    配置labelMap参数:

       wds.linkis.client.label.myLabel=label123

    5.6 输出结果集到文件#

    使用-outPath参数指定一个输出目录,linkis-cli会将结果集输出到文件,每个结果集会自动创建一个文件,输出形式如下:

        task-[taskId]-result-[idx].txt    

    例如:

        task-906-result-1.txt    task-906-result-2.txt    task-906-result-3.txt
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html b/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html index eb3a94091df..2356ac67663 100644 --- a/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/1.1.3/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/1.1.3/user_guide/udf/index.html b/zh-CN/docs/1.1.3/user_guide/udf/index.html index 0116b1b5567..0ba45d4fdcd 100644 --- a/zh-CN/docs/1.1.3/user_guide/udf/index.html +++ b/zh-CN/docs/1.1.3/user_guide/udf/index.html @@ -7,7 +7,7 @@ UDF 的使用 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html index 61a655c2e85..6b93147aa66 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engine-plugin-api/index.html @@ -7,7 +7,7 @@ 引擎插件API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html index 1d56608dc4c..350356fadd6 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-engineplugin-api/engineconn-plugin-refesh/index.html @@ -7,7 +7,7 @@ 引擎物料刷新API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html index d3a42a92bb0..ead28f5aca4 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-management-api/index.html @@ -7,7 +7,7 @@ 任务管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html index f70b4b00b12..5171fe0a98d 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-entrance-api/task-operation-api/index.html @@ -7,7 +7,7 @@ 任务操作 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html index 1651301430b..e70c49df4ce 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ec-resource-management-api/index.html @@ -7,7 +7,7 @@ EC资源信息管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html index e216c1c2f2d..b8e0b45337b 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/ecm-resource-management-api/index.html @@ -7,7 +7,7 @@ ECM资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html index 68b72d514fe..3b69b91bb1b 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/engine-management-api/index.html @@ -7,7 +7,7 @@ 引擎管理 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html index 9b075397aae..57eab7baffa 100644 --- a/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-cg-linkismanager-api/resource-management-api/index.html @@ -7,7 +7,7 @@ 资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html index 0c4c965cb6e..94b2ec4a3ab 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-history-service-api/index.html @@ -7,7 +7,7 @@ 上下文历史记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html index b727c34e2d9..7469258563d 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-listening-service-api/index.html @@ -7,7 +7,7 @@ 上下文监听服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html index 344920855d8..905c896cd8a 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-logging-service-api/index.html @@ -7,7 +7,7 @@ 上下文记录服务 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html index 43b9d7ffe66..ddc69f01e36 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-cs-api/context-service-api/index.html @@ -7,7 +7,7 @@ 上下文API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html index 41a8787f62f..1f3e36626b1 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bm-operation-management-api/index.html @@ -7,7 +7,7 @@ BM项目操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html index 245ec6e68a8..a68ab0258e8 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bml-resource-management-api/index.html @@ -7,7 +7,7 @@ BML资源管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html index e9b112d6f60..bbb0f354cbe 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/bmlfs-management-api/index.html @@ -7,7 +7,7 @@ BMLFS管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html index d7724a77b50..50afe59132e 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/currency-api/index.html @@ -7,7 +7,7 @@ 通用API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html index b5149ec39ee..92789781bf6 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/data-source-manager-api/index.html @@ -7,7 +7,7 @@ 数据源API | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html index 6d9c2dbe71b..fe616d1ec48 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/file-system-api/index.html @@ -7,7 +7,7 @@ 文件系统 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html index bf635dbd26b..d5c8ad32627 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/global-variable-api/index.html @@ -7,7 +7,7 @@ 添加全局变量 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html index c8332a88510..2a4bcd2017c 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/homepage-function-interface-api/index.html @@ -7,7 +7,7 @@ 管理台首页API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html index 8dc377169a0..a7d3ddb8980 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/instance-management-api/index.html @@ -7,7 +7,7 @@ 实例管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html index c2c65e5e5d9..c31381ed6b0 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/jobhistory-api/index.html @@ -7,7 +7,7 @@ 历史作业API | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html index a670e43ba20..c161c705fc7 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/link-error-code/index.html @@ -7,7 +7,7 @@ Linkis错误代码 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html index 780bf92ade5..1ce31e89a9a 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/mdq-table-interface-api/index.html @@ -7,7 +7,7 @@ Mdq表API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html index d08e2e87277..e77a8e461ec 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/metadatamanager-api/index.html @@ -7,7 +7,7 @@ 元数据查询API | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html index 5c4a8656aa1..91252e0e730 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/parameter-configuration-api/index.html @@ -7,7 +7,7 @@ 参数配置 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html index d310aaecaa3..1468c46708f 100644 --- a/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html +++ b/zh-CN/docs/latest/api/http/linkis-ps-publicservice-api/udf-api/index.html @@ -7,7 +7,7 @@ UDF操作管理 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/jdbc_api/index.html b/zh-CN/docs/latest/api/jdbc_api/index.html index beda1f4c845..59ca6f4e977 100644 --- a/zh-CN/docs/latest/api/jdbc_api/index.html +++ b/zh-CN/docs/latest/api/jdbc_api/index.html @@ -7,7 +7,7 @@ 任务提交执行 JDBC API 文档 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/linkis_task_operator/index.html b/zh-CN/docs/latest/api/linkis_task_operator/index.html index 4387fc9392f..3205be9ad7e 100644 --- a/zh-CN/docs/latest/api/linkis_task_operator/index.html +++ b/zh-CN/docs/latest/api/linkis_task_operator/index.html @@ -7,7 +7,7 @@ 任务提交执行 Rest API 文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/login_api/index.html b/zh-CN/docs/latest/api/login_api/index.html index 523d5fce5aa..4cfd9cb8fb4 100644 --- a/zh-CN/docs/latest/api/login_api/index.html +++ b/zh-CN/docs/latest/api/login_api/index.html @@ -7,7 +7,7 @@ 登录文档 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/api/overview/index.html b/zh-CN/docs/latest/api/overview/index.html index a20e88a652c..54d00cc5532 100644 --- a/zh-CN/docs/latest/api/overview/index.html +++ b/zh-CN/docs/latest/api/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html b/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html index ce9205eaec3..5a503da866f 100644 --- a/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html +++ b/zh-CN/docs/latest/architecture/commons/message_scheduler/index.html @@ -7,7 +7,7 @@ Message Scheduler 模块 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/commons/rpc/index.html b/zh-CN/docs/latest/architecture/commons/rpc/index.html index 860a53343c3..f039091c9fb 100644 --- a/zh-CN/docs/latest/architecture/commons/rpc/index.html +++ b/zh-CN/docs/latest/architecture/commons/rpc/index.html @@ -7,7 +7,7 @@ RPC 模块 | Apache Linkis - + @@ -35,7 +35,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html index 420c801326c..29c4c0f9a13 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/add_an_engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn 启动流程 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html index 4c3e8602b97..c1ac548acc5 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn/index.html @@ -7,7 +7,7 @@ EngineConn架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html index 6e71a0f4b9b..65cbda5a0dc 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_manager/index.html @@ -7,7 +7,7 @@ EngineConnManager架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html index 391241d2ef5..93f39f01048 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/engine/engine_conn_plugin/index.html @@ -7,7 +7,7 @@ EngineConnPlugin(ECP)架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html index 7c8c7e97f65..f0c23a4b559 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/entrance/index.html @@ -7,7 +7,7 @@ Entrance 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html index aa1936d123d..546095a75b9 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/job_submission_preparation_and_execution_process/index.html @@ -7,7 +7,7 @@ Linkis任务执行流程 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html index 90ce28a88f7..824ad5b5204 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis-cli/index.html @@ -7,7 +7,7 @@ Linkis Client 架构设计 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html index 41197317a88..87ecd0a9e40 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/app_manager/index.html @@ -7,7 +7,7 @@ AppManager 架构 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html index fd03f9006e3..1f87538d389 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/label_manager/index.html @@ -7,7 +7,7 @@ LabelManager 架构 | Apache Linkis - + @@ -26,7 +26,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html index 30fe93041f3..d437cf10fef 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html index 2f5a267ffc8..1ace3d2b17f 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/linkis_manager/resource_manager/index.html @@ -7,7 +7,7 @@ ResourceManager 架构 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html index 78c9c5ad3a6..bcd6a541d59 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html b/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html index 46f8b81c576..0c182704f0f 100644 --- a/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html +++ b/zh-CN/docs/latest/architecture/computation_governance_services/proxy_user/index.html @@ -7,7 +7,7 @@ Proxy User Mode | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html b/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html index 47bee4be224..65baf0aab4b 100644 --- a/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html +++ b/zh-CN/docs/latest/architecture/difference_between_1.0_and_0.x/index.html @@ -7,7 +7,7 @@ Linkis1.0 与 Linkis0.X 的区别简述 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html b/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html index e62d3abf17d..34afa47568b 100644 --- a/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html +++ b/zh-CN/docs/latest/architecture/microservice_governance_services/gateway/index.html @@ -7,7 +7,7 @@ 网关 Gateway 架构 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html b/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html index 6c7ae253dac..260d0dd8a61 100644 --- a/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html +++ b/zh-CN/docs/latest/architecture/microservice_governance_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/overview/index.html b/zh-CN/docs/latest/architecture/overview/index.html index 8fc9959b102..6ca7ad0f3e1 100644 --- a/zh-CN/docs/latest/architecture/overview/index.html +++ b/zh-CN/docs/latest/architecture/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html index 239f8999342..07d124fb022 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/engine_bml_dissect/index.html @@ -7,7 +7,7 @@ BML 引擎物料管理功能剖析 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html index e2570625fdd..40f433ff49c 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/bml/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html index 45a3d226683..7d5df9242bb 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service/index.html @@ -7,7 +7,7 @@ CS 架构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html index 709f4722184..d41ac213999 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_cache/index.html @@ -7,7 +7,7 @@ CS Cache 架构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html index 7186fae4df9..0453ee53ae5 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_client/index.html @@ -7,7 +7,7 @@ CS Client | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html index 677cb47995e..f38281af91e 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_highavailable/index.html @@ -7,7 +7,7 @@ CS HA 架构设计 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html index 59883507785..4a929e82850 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_listener/index.html @@ -7,7 +7,7 @@ CS Listener 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html index cbdba9269e0..c169b4ffd6b 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_persistence/index.html @@ -7,7 +7,7 @@ CS Persistence 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html index 063126a2611..8ecd35872c4 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/context_service_search/index.html @@ -7,7 +7,7 @@ CS Search 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html index 2a1f8e4c216..110a0d3a645 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/context_service/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html index 248db341099..3e099d3e21b 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/datasource_manager/index.html @@ -7,7 +7,7 @@ DataSource Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html index ee9cb93490f..6f15c4bb732 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/metadata_manager/index.html @@ -7,7 +7,7 @@ MetaData Manager Server 架构 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html index 2dd65632c21..80d405d4650 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html b/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html index 36cff8342ea..80a5f6e856d 100644 --- a/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html +++ b/zh-CN/docs/latest/architecture/public_enhancement_services/public_service/index.html @@ -7,7 +7,7 @@ PublicService 公共服务架构 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/cluster_deployment/index.html b/zh-CN/docs/latest/deployment/cluster_deployment/index.html index 6de97d04066..005a7853d7a 100644 --- a/zh-CN/docs/latest/deployment/cluster_deployment/index.html +++ b/zh-CN/docs/latest/deployment/cluster_deployment/index.html @@ -7,7 +7,7 @@ 分布式部署 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html b/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html index 9bae4ad5f92..c276f0ae6b4 100644 --- a/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html +++ b/zh-CN/docs/latest/deployment/deploy_linkis_without_hdfs/index.html @@ -7,7 +7,7 @@ Linkis 去 HDFS 部署 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html b/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html index 9628b19181d..51ad4d80302 100644 --- a/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html +++ b/zh-CN/docs/latest/deployment/engine_conn_plugin_installation/index.html @@ -7,7 +7,7 @@ 引擎的安装 | Apache Linkis - + @@ -38,7 +38,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html b/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html index 0c0ac8f4401..2014e9adc6f 100644 --- a/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html +++ b/zh-CN/docs/latest/deployment/installation_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 部署后的目录结构 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html b/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html index 729b4f00a35..4b96b06ae6f 100644 --- a/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html +++ b/zh-CN/docs/latest/deployment/involve_skywalking_into_linkis/index.html @@ -7,7 +7,7 @@ 开启 SkyWalking | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html b/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html index 98389a2e806..2bd32171029 100644 --- a/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html +++ b/zh-CN/docs/latest/deployment/linkis_scriptis_install/index.html @@ -7,7 +7,7 @@ 工具 scriptis 的安装部署 | Apache Linkis - + @@ -29,7 +29,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/quick_deploy/index.html b/zh-CN/docs/latest/deployment/quick_deploy/index.html index 24696db62a1..d0d52be1e73 100644 --- a/zh-CN/docs/latest/deployment/quick_deploy/index.html +++ b/zh-CN/docs/latest/deployment/quick_deploy/index.html @@ -7,7 +7,7 @@ 快速单机部署 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html b/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html index 25a7b1d76c6..4984e2d1ca8 100644 --- a/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html +++ b/zh-CN/docs/latest/deployment/sourcecode_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 源码目录结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/start_metadatasource/index.html b/zh-CN/docs/latest/deployment/start_metadatasource/index.html index 148bfdbfaaa..6ce8891741e 100644 --- a/zh-CN/docs/latest/deployment/start_metadatasource/index.html +++ b/zh-CN/docs/latest/deployment/start_metadatasource/index.html @@ -7,7 +7,7 @@ 数据源功能使用 | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html b/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html index 9065b3c9162..79210988b61 100644 --- a/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html +++ b/zh-CN/docs/latest/deployment/unpack_hierarchical_structure/index.html @@ -7,7 +7,7 @@ 安装包目录结构 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/deployment/web_install/index.html b/zh-CN/docs/latest/deployment/web_install/index.html index bad4f85eb8e..e4eb11c29ae 100644 --- a/zh-CN/docs/latest/deployment/web_install/index.html +++ b/zh-CN/docs/latest/deployment/web_install/index.html @@ -7,7 +7,7 @@ 管理台部署 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_compile_and_package/index.html b/zh-CN/docs/latest/development/linkis_compile_and_package/index.html index 51d9468e537..c254f6923f1 100644 --- a/zh-CN/docs/latest/development/linkis_compile_and_package/index.html +++ b/zh-CN/docs/latest/development/linkis_compile_and_package/index.html @@ -7,7 +7,7 @@ Linkis 后端编译打包 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_config/index.html b/zh-CN/docs/latest/development/linkis_config/index.html index c54660436be..c6e51e04e10 100644 --- a/zh-CN/docs/latest/development/linkis_config/index.html +++ b/zh-CN/docs/latest/development/linkis_config/index.html @@ -7,7 +7,7 @@ Linkis 配置参数介绍 | Apache Linkis - + @@ -31,7 +31,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_debug/index.html b/zh-CN/docs/latest/development/linkis_debug/index.html index 3f3ef576486..63dd221db88 100644 --- a/zh-CN/docs/latest/development/linkis_debug/index.html +++ b/zh-CN/docs/latest/development/linkis_debug/index.html @@ -7,7 +7,7 @@ 服务调试指引 | Apache Linkis - + @@ -52,7 +52,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html b/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html index 558732adc3e..11b7461b776 100644 --- a/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html +++ b/zh-CN/docs/latest/development/linkis_debug_in_mac/index.html @@ -7,7 +7,7 @@ 在Mac上调试Linkis | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/new_engine_conn/index.html b/zh-CN/docs/latest/development/new_engine_conn/index.html index 9f64539334a..cb7cf7e98d6 100644 --- a/zh-CN/docs/latest/development/new_engine_conn/index.html +++ b/zh-CN/docs/latest/development/new_engine_conn/index.html @@ -7,7 +7,7 @@ 如何实现一个新引擎 | Apache Linkis - + @@ -54,7 +54,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/development/web_build/index.html b/zh-CN/docs/latest/development/web_build/index.html index 0c8029da948..06d92795122 100644 --- a/zh-CN/docs/latest/development/web_build/index.html +++ b/zh-CN/docs/latest/development/web_build/index.html @@ -7,7 +7,7 @@ Linkis 管理台编译 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/flink/index.html b/zh-CN/docs/latest/engine_usage/flink/index.html index 48af631b598..527ae3c59ae 100644 --- a/zh-CN/docs/latest/engine_usage/flink/index.html +++ b/zh-CN/docs/latest/engine_usage/flink/index.html @@ -7,7 +7,7 @@ Flink 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/hive/index.html b/zh-CN/docs/latest/engine_usage/hive/index.html index 90d66a0396a..fe440a94e90 100644 --- a/zh-CN/docs/latest/engine_usage/hive/index.html +++ b/zh-CN/docs/latest/engine_usage/hive/index.html @@ -7,7 +7,7 @@ Hive 引擎 | Apache Linkis - + @@ -28,7 +28,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/jdbc/index.html b/zh-CN/docs/latest/engine_usage/jdbc/index.html index fe418c808ce..dccdb9c04b2 100644 --- a/zh-CN/docs/latest/engine_usage/jdbc/index.html +++ b/zh-CN/docs/latest/engine_usage/jdbc/index.html @@ -7,7 +7,7 @@ JDBC 引擎 | Apache Linkis - + @@ -22,7 +22,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/openlookeng/index.html b/zh-CN/docs/latest/engine_usage/openlookeng/index.html index bbd8b1303bc..cf268d3d43a 100644 --- a/zh-CN/docs/latest/engine_usage/openlookeng/index.html +++ b/zh-CN/docs/latest/engine_usage/openlookeng/index.html @@ -7,7 +7,7 @@ openLooKeng 引擎 | Apache Linkis - + @@ -23,7 +23,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/overview/index.html b/zh-CN/docs/latest/engine_usage/overview/index.html index 988b316a094..ef5ab1bc054 100644 --- a/zh-CN/docs/latest/engine_usage/overview/index.html +++ b/zh-CN/docs/latest/engine_usage/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/pipeline/index.html b/zh-CN/docs/latest/engine_usage/pipeline/index.html index c1d8029d387..5b15d402310 100644 --- a/zh-CN/docs/latest/engine_usage/pipeline/index.html +++ b/zh-CN/docs/latest/engine_usage/pipeline/index.html @@ -7,7 +7,7 @@ Pipeline 引擎 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/python/index.html b/zh-CN/docs/latest/engine_usage/python/index.html index 536f7d50223..77e34ca6187 100644 --- a/zh-CN/docs/latest/engine_usage/python/index.html +++ b/zh-CN/docs/latest/engine_usage/python/index.html @@ -7,21 +7,21 @@ Python 引擎 | Apache Linkis - +
    -
    Version: 1.1.2

    Python 引擎

    本文主要介绍在Linkis1.X中,Python引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 +

    Version: 1.1.2

    Python 引擎

    本文主要介绍在Linkis1.X中,Python引擎的配置、部署和使用。

    1.Python引擎使用前的环境配置#

    如果您希望在您的服务器上使用python引擎,您需要保证用户的PATH中是有python的执行目录和执行权限。

    环境变量名环境变量内容备注
    pythonpython执行环境建议使用anaconda的python执行器

    表1-1 环境配置清单

    2.Python引擎的配置和部署#

    2.1 Python版本的选择和编译#

    Python是支持python2 和 python3的,您可以简单更改配置就可以完成Python版本的切换,不需要重新编译python的引擎版本。

    2.2 python engineConn部署和加载#

    此处可以使用默认的加载方式即可正常使用。

    3.Python引擎的使用#

    准备操作#

    在linkis上提交python之前,您只需要保证您的用户的$PATH中有python的路径即可。

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. 对于Python任务您只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "python-python2"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "python"); // required codeType 

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Python的使用如下:

    sh ./bin/linkis-cli -engineType python-python2 -codeType python -code "print(\"hello\")"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,右键目录然后新建python脚本并编写python代码并点击执行。

    python的执行逻辑是通过 Py4j的方式,启动一个的python -的gateway,然后Python引擎将代码提交到python的执行器进行执行。

    图3-1 python的执行效果截图

    4.Python引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如python的版本和以及python需要加载的一些module等。

    python

    图4-1 python的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/shell/index.html b/zh-CN/docs/latest/engine_usage/shell/index.html index f30dfc507dd..6a3805e4119 100644 --- a/zh-CN/docs/latest/engine_usage/shell/index.html +++ b/zh-CN/docs/latest/engine_usage/shell/index.html @@ -7,7 +7,7 @@ Shell 引擎 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/spark/index.html b/zh-CN/docs/latest/engine_usage/spark/index.html index 902a6a9a0de..1f57db01326 100644 --- a/zh-CN/docs/latest/engine_usage/spark/index.html +++ b/zh-CN/docs/latest/engine_usage/spark/index.html @@ -7,7 +7,7 @@ Spark 引擎 | Apache Linkis - + @@ -15,7 +15,8 @@
    Version: 1.1.2

    Spark 引擎

    本文主要介绍在Linkis1.X中,spark引擎的配置、部署和使用。

    1.Spark引擎使用前的环境配置#

    如果您希望在您的服务器上使用spark引擎,您需要保证以下的环境变量已经设置正确并且引擎的启动用户是有这些环境变量的。

    强烈建议您在执行spark任务之前,检查下执行用户的这些环境变量。

    环境变量名环境变量内容备注
    JAVA_HOMEJDK安装路径必须
    HADOOP_HOMEHadoop安装路径必须
    HADOOP_CONF_DIRHadoop配置路径必须
    HIVE_CONF_DIRHive配置路径必须
    SPARK_HOMESpark安装路径必须
    SPARK_CONF_DIRSpark配置路径必须
    pythonpython建议使用anaconda的python作为默认python

    表1-1 环境配置清单

    2.Spark引擎的配置和部署#

    2.1 spark版本的选择和编译#

    注意: 编译spark引擎之前需要进行linkis项目全量编译 理论上Linkis1.X支持的spark2.x以上的所有版本。默认支持的版本为Spark2.4.3。如果您想使用其他的spark版本,如spark2.1.0,则您仅仅需要将插件spark的版本进行修改,然后进行编译即可。具体的,您可以找到linkis-engineplugin-spark模块,将maven依赖中"spark.version"标签的值改成2.1.0,然后单独编译此模块即可。

    2.2 spark engineConn部署和加载#

    如果您已经编译完了您的spark引擎的插件,那么您需要将新的插件放置到指定的位置中才能加载,具体可以参考下面这篇文章

    EngineConnPlugin引擎插件安装

    2.3 spark引擎的标签#

    Linkis1.X是通过标签配置来区分引擎版本的,所以需要我们在数据库中插入数据,插入的方式如下文所示。

    EngineConnPlugin引擎插件安装 > 2.2 管理台Configuration配置修改(可选)

    3.spark引擎的使用#

    准备操作,队列设置#

    因为spark的执行需要队列的资源,所以用户在执行之前,必须要设置自己能够执行的队列。

    yarn

    图3-1 队列设置 您也可以通过在提交参数的StartUpMap里面添加队列的值:startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    3.1 通过Linkis SDK进行使用#

    Linkis提供了Java和Scala 的SDK向Linkis服务端提交任务. 具体可以参考 JAVA SDK Manual. -对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    #You can also add the queue value in the StartUpMap of the submission parameter: startupMap.put("wds.linkis.rm.yarnqueue", "dws")

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), +对于Spark任务你只需要修改Demo中的EngineConnType和CodeType参数即可:

            Map<String, Object> labels = new HashMap<String, Object>();        labels.put(LabelKeyConstant.ENGINE_TYPE_KEY, "spark-2.4.3"); // required engineType Label        labels.put(LabelKeyConstant.USER_CREATOR_TYPE_KEY, "hadoop-IDE");// required execute user and creator        labels.put(LabelKeyConstant.CODE_TYPE_KEY, "sql"); // required codeType py,sql,scala

    3.2 通过Linkis-cli进行任务提交#

    Linkis 1.0后提供了cli的方式提交任务,我们只需要指定对应的EngineConn和CodeType标签类型即可,Spark的使用如下:

    ## codeType对应关系 py-->pyspark  sql-->sparkSQL scala-->Spark scalash ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql -code "show tables"  -submitUser hadoop -proxyUser hadoop
    +# 可以在提交参数通过-confMap wds.linkis.yarnqueue=dws  来指定yarn 队列sh ./bin/linkis-cli -engineType spark-2.4.3 -codeType sql  -confMap wds.linkis.yarnqueue=dws -code "show tables"  -submitUser hadoop -proxyUser hadoop

    具体使用可以参考: Linkis CLI Manual.

    3.3 Scriptis的使用方式#

    Scriptis的使用方式是最简单的,您可以直接进入Scriptis,新建sql、scala或者pyspark脚本进行执行。

    sql的方式是最简单的,您可以新建sql脚本然后编写进行执行,执行的时候,会有进度的显示。如果一开始用户是没有spark引擎的话,sql的执行会启动一个spark会话(这里可能会花一些时间), SparkSession初始化之后,就可以开始执行sql。

    图3-2 sparksql的执行效果截图

    spark-scala的任务,我们已经初始化好了sqlContext等变量,用户可以直接使用这个sqlContext进行sql的执行。

    图3-3 spark-scala的执行效果图

    类似的,pyspark的方式中,我们也已经初始化好了SparkSession,用户可以直接使用spark.sql的方式进行执行sql。

    图3-4 pyspark的执行方式

    4.spark引擎的用户设置#

    除了以上引擎配置,用户还可以进行自定义的设置,比如spark会话executor个数和executor的内存。这些参数是为了用户能够更加自由地设置自己的spark的参数,另外spark其他参数也可以进行修改,比如的pyspark的python版本等。

    spark

    图4-1 spark的用户自定义配置管理台

    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/engine_usage/sqoop/index.html b/zh-CN/docs/latest/engine_usage/sqoop/index.html index 4c39131cf73..29aaf794c73 100644 --- a/zh-CN/docs/latest/engine_usage/sqoop/index.html +++ b/zh-CN/docs/latest/engine_usage/sqoop/index.html @@ -7,7 +7,7 @@ Sqoop 引擎 | Apache Linkis - + @@ -32,7 +32,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/introduction/index.html b/zh-CN/docs/latest/introduction/index.html index 9142f4a145f..7a594531c00 100644 --- a/zh-CN/docs/latest/introduction/index.html +++ b/zh-CN/docs/latest/introduction/index.html @@ -7,7 +7,7 @@ Linkis 简述 | Apache Linkis - + @@ -24,7 +24,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/release/index.html b/zh-CN/docs/latest/release/index.html index 7294d68a982..0f9f8bea08d 100644 --- a/zh-CN/docs/latest/release/index.html +++ b/zh-CN/docs/latest/release/index.html @@ -7,7 +7,7 @@ 版本总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/table/udf-table/index.html b/zh-CN/docs/latest/table/udf-table/index.html index 37fad9b6403..04d508139c8 100644 --- a/zh-CN/docs/latest/table/udf-table/index.html +++ b/zh-CN/docs/latest/table/udf-table/index.html @@ -7,7 +7,7 @@ UDF 的表结构 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tags/index.html b/zh-CN/docs/latest/tags/index.html index 34afa6c16b8..6cfe87c0df3 100644 --- a/zh-CN/docs/latest/tags/index.html +++ b/zh-CN/docs/latest/tags/index.html @@ -7,7 +7,7 @@ Tags | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html b/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html index 449034ecbac..f023ae59b5e 100644 --- a/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html +++ b/zh-CN/docs/latest/tuning_and_troubleshooting/configuration/index.html @@ -7,7 +7,7 @@ 参数列表 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html b/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html index ceebf50556f..2b653f301fe 100644 --- a/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html +++ b/zh-CN/docs/latest/tuning_and_troubleshooting/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -33,7 +33,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html b/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html index 83c72d39876..1912510c6ee 100644 --- a/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html +++ b/zh-CN/docs/latest/tuning_and_troubleshooting/tuning/index.html @@ -7,7 +7,7 @@ 调优手册 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html b/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html index ce31fef0b2b..edea4124698 100644 --- a/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html +++ b/zh-CN/docs/latest/upgrade/upgrade_from_0.X_to_1.0_guide/index.html @@ -7,7 +7,7 @@ 0.x到1.0的升级指南 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/upgrade/upgrade_guide/index.html b/zh-CN/docs/latest/upgrade/upgrade_guide/index.html index 0a5ddc484a9..e216f9043be 100644 --- a/zh-CN/docs/latest/upgrade/upgrade_guide/index.html +++ b/zh-CN/docs/latest/upgrade/upgrade_guide/index.html @@ -7,7 +7,7 @@ 1.0.3以上的版本升级 | Apache Linkis - + @@ -36,7 +36,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/console_manual/index.html b/zh-CN/docs/latest/user_guide/console_manual/index.html index 294eb803ee7..1218433cf51 100644 --- a/zh-CN/docs/latest/user_guide/console_manual/index.html +++ b/zh-CN/docs/latest/user_guide/console_manual/index.html @@ -7,7 +7,7 @@ Linkis 管理台的使用 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/how_to_use/index.html b/zh-CN/docs/latest/user_guide/how_to_use/index.html index 9767e96731d..0e11008bc7f 100644 --- a/zh-CN/docs/latest/user_guide/how_to_use/index.html +++ b/zh-CN/docs/latest/user_guide/how_to_use/index.html @@ -7,7 +7,7 @@ 如何使用 Linkis1.0 | Apache Linkis - + @@ -21,7 +21,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html b/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html index 05d8c7e7e64..2a20769ab6e 100644 --- a/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html +++ b/zh-CN/docs/latest/user_guide/linkis-datasource-client/index.html @@ -7,7 +7,7 @@ DataSource Client SDK 的使用 | Apache Linkis - + @@ -34,7 +34,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html b/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html index 81ad7d501e6..e4ebe197814 100644 --- a/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html +++ b/zh-CN/docs/latest/user_guide/linkiscli_manual/index.html @@ -7,7 +7,7 @@ Linkis-Cli 方式使用 | Apache Linkis - + @@ -19,7 +19,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/overview/index.html b/zh-CN/docs/latest/user_guide/overview/index.html index 60637f84300..475a154faf6 100644 --- a/zh-CN/docs/latest/user_guide/overview/index.html +++ b/zh-CN/docs/latest/user_guide/overview/index.html @@ -7,7 +7,7 @@ 总览 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/sdk_manual/index.html b/zh-CN/docs/latest/user_guide/sdk_manual/index.html index 4a2db23121e..38a296e46c8 100644 --- a/zh-CN/docs/latest/user_guide/sdk_manual/index.html +++ b/zh-CN/docs/latest/user_guide/sdk_manual/index.html @@ -7,7 +7,7 @@ JAVA SDK 方式使用 | Apache Linkis - + @@ -45,7 +45,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/docs/latest/user_guide/udf/index.html b/zh-CN/docs/latest/user_guide/udf/index.html index 26143016888..15f69cc7d2e 100644 --- a/zh-CN/docs/latest/user_guide/udf/index.html +++ b/zh-CN/docs/latest/user_guide/udf/index.html @@ -7,7 +7,7 @@ UDF 的使用 | Apache Linkis - + @@ -20,7 +20,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/download-logo/index.html b/zh-CN/download/download-logo/index.html index 46bb4da6251..5db2abd1a4e 100644 --- a/zh-CN/download/download-logo/index.html +++ b/zh-CN/download/download-logo/index.html @@ -7,7 +7,7 @@ 下载Logo | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/main/index.html b/zh-CN/download/main/index.html index 0d285700711..07bf630b797 100644 --- a/zh-CN/download/main/index.html +++ b/zh-CN/download/main/index.html @@ -7,7 +7,7 @@ 版本列表 | Apache Linkis - + @@ -25,7 +25,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.0.2/index.html b/zh-CN/download/release-notes-1.0.2/index.html index e86f1d2338c..0bf0647480a 100644 --- a/zh-CN/download/release-notes-1.0.2/index.html +++ b/zh-CN/download/release-notes-1.0.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.2 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.0.3/index.html b/zh-CN/download/release-notes-1.0.3/index.html index 7827b3e830f..449f07b1a88 100644 --- a/zh-CN/download/release-notes-1.0.3/index.html +++ b/zh-CN/download/release-notes-1.0.3/index.html @@ -7,7 +7,7 @@ Release Notes 1.0.3 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.1.0/index.html b/zh-CN/download/release-notes-1.1.0/index.html index f9d115d6cb7..288fa65e4ea 100644 --- a/zh-CN/download/release-notes-1.1.0/index.html +++ b/zh-CN/download/release-notes-1.1.0/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.0 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.1.1/index.html b/zh-CN/download/release-notes-1.1.1/index.html index 19eed83e1ba..3cd81c75442 100644 --- a/zh-CN/download/release-notes-1.1.1/index.html +++ b/zh-CN/download/release-notes-1.1.1/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.1 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/download/release-notes-1.1.2/index.html b/zh-CN/download/release-notes-1.1.2/index.html index 9eed36444a4..f5eb917cdbe 100644 --- a/zh-CN/download/release-notes-1.1.2/index.html +++ b/zh-CN/download/release-notes-1.1.2/index.html @@ -7,7 +7,7 @@ Release Notes 1.1.2 | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/faq/main/index.html b/zh-CN/faq/main/index.html index 9baa330a46c..9b2b100b2d0 100644 --- a/zh-CN/faq/main/index.html +++ b/zh-CN/faq/main/index.html @@ -7,7 +7,7 @@ Q&A | Apache Linkis - + @@ -75,7 +75,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/home/index.html b/zh-CN/home/index.html index 4644c474ca8..d63df127b5c 100644 --- a/zh-CN/home/index.html +++ b/zh-CN/home/index.html @@ -7,14 +7,14 @@ - +
    -

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way +

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/zh-CN/index.html b/zh-CN/index.html index 004f2289183..667203afecd 100644 --- a/zh-CN/index.html +++ b/zh-CN/index.html @@ -7,19 +7,19 @@ Apache Linkis | Apache Linkis - +
    -

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way +

    Computation Middleware

    Before

    Each upper application directly connects to and accesses various underlying engines in a tightly coupled way, which makes big data platform a complex network architecture.

    before

    After

    Build a common layer of "computation middleware" between the numerous upper-layer applications and the countless underlying engines to resolve these complex connection problems in a standardized reusable way

    before

    Description

    Standardized Interfaces

    Linkis provides standardized interfaces (REST, JDBC, WebSocket etc.) to easily connect to various underlying engines (Spark, Presto, Flink, etc.), and acts as a proxy between the upper applications layer and underlying engines layer.

    description

    Computation Governance

    Linkis is able to facilitate the connectivity, governance and orchestration capabilities of different kind of engines like OLAP, OLTP (developing), Streaming, and handle all these "computation governance" affairs in a standardized reusable way.

    Core Features

    Connectivity

    Simplify the operation environment; decouple the upper and lower layers, which make the upper layer insensitive when bottom layers changed

    Scalability

    Distributed microservice architecture with great scalability and extensibility; quickly integrate with the new underlying engine

    Controllability

    Converge engine entrance, unify identity verification, high-risk prevention and control, audit records; label-based multi-level refined resource control and recovery capabilities

    Orchestration

    Computing strategy design based on active-active, mixed computing, transcation Orchestrator Service

    Reusability

    Highly reduced the back-end development workload of upper-level applications development; Swiftly and efficiently build a data platform tool suite based on Linkis

    - + \ No newline at end of file diff --git a/zh-CN/search/index.html b/zh-CN/search/index.html index c37e3cd53b6..d1494ae58a8 100644 --- a/zh-CN/search/index.html +++ b/zh-CN/search/index.html @@ -7,7 +7,7 @@ Search the documentation | Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/team/index.html b/zh-CN/team/index.html index 6397b94cb98..044070a015c 100644 --- a/zh-CN/team/index.html +++ b/zh-CN/team/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/user/index.html b/zh-CN/user/index.html index 4194f714163..2031426b981 100644 --- a/zh-CN/user/index.html +++ b/zh-CN/user/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file diff --git a/zh-CN/versions/index.html b/zh-CN/versions/index.html index 96dc5bb0f17..b96e253ead9 100644 --- a/zh-CN/versions/index.html +++ b/zh-CN/versions/index.html @@ -7,7 +7,7 @@ Apache Linkis - + @@ -18,7 +18,7 @@
    Copyright © 2019-2020 The Apache Software Foundation. Apache Linkis, Linkis, and its feather logo are trademarks of The Apache Software Foundation.
    - + \ No newline at end of file