@@ -63,6 +71,8 @@
import mResources from './_source/resources'
import mLocalParams from './_source/localParams'
import disabledState from '@/module/mixin/disabledState'
+ import Treeselect from '@riophae/vue-treeselect'
+ import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import codemirror from '@/conf/home/pages/resource/pages/file/pages/_source/codemirror'
let editor
@@ -78,7 +88,14 @@
// resource(list)
resourceList: [],
// Cache ResourceList
- cacheResourceList: []
+ cacheResourceList: [],
+ // define options
+ options: [],
+ normalizer(node) {
+ return {
+ label: node.name
+ }
+ },
}
},
mixins: [disabledState],
@@ -143,17 +160,19 @@
return false
}
- if (!this.$refs.refResources._verifResources()) {
- return false
- }
-
// localParams Subcomponent verification
if (!this.$refs.refLocalParams._verifProp()) {
return false
}
+ // Process resourcelist
+ let dataProcessing= _.map(this.resourceList, v => {
+ return {
+ id: v
+ }
+ })
// storage
this.$emit('on-params', {
- resourceList: this.resourceList,
+ resourceList: dataProcessing,
localParams: this.localParams,
rawScript: editor.getValue()
})
@@ -163,8 +182,6 @@
* Processing code highlighting
*/
_handlerEditor () {
- this._destroyEditor()
-
// editor
editor = codemirror('code-shell-mirror', {
mode: 'shell',
@@ -179,51 +196,41 @@
}
}
- this.changes = () => {
- this._cacheParams()
- }
-
// Monitor keyboard
editor.on('keypress', this.keypress)
-
- editor.on('changes', this.changes)
-
editor.setValue(this.rawScript)
return editor
},
- _cacheParams () {
- this.$emit('on-cache-params', {
- resourceList: this.cacheResourceList,
- localParams: this.localParams,
- rawScript: editor ? editor.getValue() : ''
- });
- },
- _destroyEditor () {
- if (editor) {
- editor.toTextArea() // Uninstall
- editor.off($('.code-sql-mirror'), 'keypress', this.keypress)
- editor.off($('.code-sql-mirror'), 'changes', this.changes)
- }
+ diGuiTree(item) { // Recursive convenience tree structure
+ item.forEach(item => {
+ item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?
+ delete item.children : this.diGuiTree(item.children);
+ })
}
},
watch: {
//Watch the cacheParams
cacheParams (val) {
- this._cacheParams()
+ this.$emit('on-cache-params', val);
}
},
computed: {
cacheParams () {
return {
- resourceList: this.cacheResourceList,
- localParams: this.localParams
+ resourceList: _.map(this.resourceList, v => {
+ return {id: v}
+ }),
+ localParams: this.localParams,
+ rawScript: editor ? editor.getValue() : ''
}
}
},
created () {
+ let item = this.store.state.dag.resourcesListS
+ this.diGuiTree(item)
+ this.options = item
let o = this.backfillItem
-
// Non-null objects represent backfill
if (!_.isEmpty(o)) {
this.rawScript = o.params.rawScript || ''
@@ -231,7 +238,9 @@
// backfill resourceList
let resourceList = o.params.resourceList || []
if (resourceList.length) {
- this.resourceList = resourceList
+ this.resourceList = _.map(resourceList, v => {
+ return v.id
+ })
this.cacheResourceList = resourceList
}
@@ -251,10 +260,9 @@
if (editor) {
editor.toTextArea() // Uninstall
editor.off($('.code-shell-mirror'), 'keypress', this.keypress)
- editor.off($('.code-shell-mirror'), 'changes', this.changes)
}
},
- components: { mLocalParams, mListBox, mResources, mScriptBox }
+ components: { mLocalParams, mListBox, mResources, mScriptBox, Treeselect }
}
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/createUdfFolder/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/createUdfFolder/index.vue
new file mode 100755
index 0000000000..2511452269
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/createUdfFolder/index.vue
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+ * {{$t('Folder Name')}}
+
+
+
+
+
+
+ {{$t('Description')}}
+
+
+
+
+
+
+
+
+
+ {{spinnerLoading ? 'Loading...' : $t('Create')}}
+ $router.push({name: 'resource-udf'})"> {{$t('Cancel')}}
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/details/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/details/index.vue
index e961d8b1ee..6875cd4b2e 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/details/index.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/details/index.vue
@@ -21,7 +21,7 @@
{{name}}
-
+
{{size}}
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/edit/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/edit/index.vue
index a0d1d7d187..0290af0988 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/edit/index.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/edit/index.vue
@@ -44,8 +44,8 @@
+
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subFileFolder/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subFileFolder/index.vue
new file mode 100755
index 0000000000..9f903a127b
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subFileFolder/index.vue
@@ -0,0 +1,144 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+ * {{$t('Folder Name')}}
+
+
+
+
+
+
+
+ {{$t('Description')}}
+
+
+
+
+
+
+
+
+
+ {{spinnerLoading ? 'Loading...' : $t('Create')}}
+ $router.push({name: 'file'})"> {{$t('Cancel')}}
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/list.vue
new file mode 100755
index 0000000000..f5e801a205
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/list.vue
@@ -0,0 +1,251 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+
+ {{$t('#')}}
+
+
+ {{$t('Name')}}
+
+
+ {{$t('Whether directory')}}
+
+
+ {{$t('File Name')}}
+
+
+ {{$t('Description')}}
+
+
+ {{$t('Size')}}
+
+
+ {{$t('Update Time')}}
+
+
+ {{$t('Operation')}}
+
+
+
+
+ {{parseInt(pageNo === 1 ? ($index + 1) : (($index + 1) + (pageSize * (pageNo - 1))))}}
+
+
+
+ {{item.alias}}
+
+
+
+ {{item.directory? $t('Yes') : $t('No')}}
+
+ {{item.fileName}}
+
+ {{item.description}}
+ -
+
+
+ {{_rtSize(item.size)}}
+
+
+ {{item.updateTime | formatDate}}
+ -
+
+
+
+
+
+
+
+
+
+
+
+ {{$t('Delete?')}}
+
+ {{$t('Cancel')}}
+ {{$t('Confirm')}}
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/rename.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/rename.vue
new file mode 100755
index 0000000000..6f7dacae89
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/_source/rename.vue
@@ -0,0 +1,120 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+ * {{$t('Name')}}
+
+
+
+
+
+
+ {{$t('Description')}}
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue
new file mode 100755
index 0000000000..12be6b0bc8
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/subdirectory/index.vue
@@ -0,0 +1,173 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+
+
+ $router.push({path: `/resource/file/subFileFolder/${searchParams.id}`})">{{$t('Create folder')}}
+ $router.push({path: `/resource/file/subFile/${searchParams.id}`})">{{$t('Create File')}}
+ {{$t('Upload Files')}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/createUdfFolder/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/createUdfFolder/index.vue
new file mode 100755
index 0000000000..c707ce8c90
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/createUdfFolder/index.vue
@@ -0,0 +1,128 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+ * {{$t('Folder Name')}}
+
+
+
+
+
+
+ {{$t('Description')}}
+
+
+
+
+
+
+
+
+
+ {{spinnerLoading ? 'Loading...' : $t('Create')}}
+ $router.push({name: 'resource-udf'})"> {{$t('Cancel')}}
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/createUdf.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/createUdf.vue
index 01d8d22650..1408c552db 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/createUdf.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/createUdf.vue
@@ -15,7 +15,7 @@
* limitations under the License.
*/
-
+
@@ -72,26 +72,25 @@
* {{$t('UDF Resources')}}
-
-
-
-
+
+ {{ node.raw.fullName }}
+
{{$t('Upload Resources')}}
+
+ * {{$t('UDF resources directory')}}
+
+
+ {{ node.raw.fullName }}
+
+
+
@@ -115,6 +114,8 @@
import _ from 'lodash'
import i18n from '@/module/i18n'
import store from '@/conf/home/store'
+ import Treeselect from '@riophae/vue-treeselect'
+ import '@riophae/vue-treeselect/dist/vue-treeselect.css'
import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
import mUdfUpdate from '@/module/components/fileUpdate/udfUpdate'
@@ -130,10 +131,16 @@
argTypes: '',
database: '',
description: '',
- resourceId: '',
+ resourceId: null,
+ pid: null,
udfResourceList: [],
isUpdate: false,
- upDisabled: false
+ upDisabled: false,
+ normalizer(node) {
+ return {
+ label: node.name
+ }
+ },
}
},
props: {
@@ -192,17 +199,54 @@
// disabled update
this.upDisabled = true
},
+ // selTree
+ selTree(node) {
+ this.$refs.assignment.receivedValue(node.id,node.fullName)
+ },
/**
* get udf resources
*/
_getUdfList () {
return new Promise((resolve, reject) => {
this.store.dispatch('resource/getResourcesList', { type: 'UDF' }).then(res => {
- this.udfResourceList = res.data
+ let item = res.data
+ this.filterEmptyDirectory(item)
+ item = this.filterEmptyDirectory(item)
+ let item1 = _.cloneDeep(res.data)
+ this.diGuiTree(item)
+
+ this.diGuiTree(this.filterJarFile(item1))
+ this.udfResourceList = item
+ this.udfResourceDirList = item1
resolve()
})
})
},
+ // filterEmptyDirectory
+ filterEmptyDirectory(array) {
+ for (const item of array) {
+ if (item.children) {
+ this.filterEmptyDirectory(item.children)
+ }
+ }
+ return array.filter(n => ((/\.jar$/.test(n.name) && n.children.length==0) || (!/\.jar$/.test(n.name) && n.children.length>0)))
+ },
+ // filterJarFile
+ filterJarFile (array) {
+ for (const item of array) {
+ if (item.children) {
+ item.children = this.filterJarFile(item.children)
+ }
+ }
+ return array.filter(n => !/\.jar$/.test(n.name))
+ },
+ // diGuiTree
+ diGuiTree(item) { // Recursive convenience tree structure
+ item.forEach(item => {
+ item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?
+ delete item.children : this.diGuiTree(item.children);
+ })
+ },
/**
* Upload udf resources
*/
@@ -257,8 +301,7 @@
})
}
},
- watch: {
- },
+ watch: {},
created () {
this._getUdfList().then(res => {
// edit
@@ -271,13 +314,18 @@
this.description = this.item.description || ''
this.resourceId = this.item.resourceId
} else {
- this.resourceId = this.udfResourceList.length && this.udfResourceList[0].id || ''
+ this.resourceId = null
}
})
},
mounted () {
},
- components: { mPopup, mListBoxF, mUdfUpdate }
+ components: { mPopup, mListBoxF, mUdfUpdate, Treeselect }
}
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/list.vue
index ed441baad0..d77f55722e 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/list.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/function/_source/list.vue
@@ -43,7 +43,7 @@ v-ps
-
+
{{$t('Update Time')}}
@@ -71,7 +71,8 @@ v-ps
-
- {{item.resourceName}}
+ {{item.resourceName}}
+ -
+
+
+ * {{$t('File Name')}}
+
+
+
+
+
+
+ {{$t('Description')}}
+
+
+
+
+
+
+ * {{$t('Upload Files')}}
+
+
+
+
+ {{$t('Upload')}}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue b/dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue
index b7cef4ecfb..5e06ddacce 100644
--- a/dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue
+++ b/dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue
@@ -107,6 +107,8 @@
progress: 0,
// file
file: '',
+ currentDir: '/',
+ pid: -1,
// Whether to drag upload
dragOver: false
}
@@ -124,7 +126,7 @@
this.$refs['popup'].spinnerLoading = true
if (this._validation()) {
this.store.dispatch('resource/resourceVerifyName', {
- name: this.name,
+ fullName: '/'+this.name,
type: this.type
}).then(res => {
const isLt1024M = this.file.size / 1024 / 1024 < 1024
@@ -172,6 +174,8 @@
formData.append('file', this.file)
formData.append('type', this.type)
formData.append('name', this.name)
+ formData.append('pid', this.pid)
+ formData.append('currentDir', this.currentDir)
formData.append('description', this.description)
io.post(`resources/create`, res => {
this.$message.success(res.msg)
diff --git a/dolphinscheduler-ui/src/js/module/components/fileUpdate/resourceChildUpdate.vue b/dolphinscheduler-ui/src/js/module/components/fileUpdate/resourceChildUpdate.vue
new file mode 100755
index 0000000000..05d12b15e1
--- /dev/null
+++ b/dolphinscheduler-ui/src/js/module/components/fileUpdate/resourceChildUpdate.vue
@@ -0,0 +1,318 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+
+
+
+
+
+
+
+
diff --git a/dolphinscheduler-ui/src/js/module/components/fileUpdate/udfUpdate.vue b/dolphinscheduler-ui/src/js/module/components/fileUpdate/udfUpdate.vue
index 4285df5720..7c852c5caa 100644
--- a/dolphinscheduler-ui/src/js/module/components/fileUpdate/udfUpdate.vue
+++ b/dolphinscheduler-ui/src/js/module/components/fileUpdate/udfUpdate.vue
@@ -25,7 +25,7 @@
size="small"
v-model="udfName"
:disabled="progress !== 0"
- style="width: 268px"
+ style="width: 535px"
:placeholder="$t('Please enter resource name')"
autocomplete="off">
@@ -66,7 +66,9 @@
udfDesc: '',
file: '',
progress: 0,
- spinnerLoading: false
+ spinnerLoading: false,
+ pid: null,
+ currentDir: ''
}
},
props: {
@@ -77,6 +79,10 @@
* validation
*/
_validation () {
+ if (!this.currentDir) {
+ this.$message.warning(`${i18n.$t('Please select UDF resources directory')}`)
+ return false
+ }
if (!this.udfName) {
this.$message.warning(`${i18n.$t('Please enter file name')}`)
return false
@@ -90,7 +96,7 @@
_verifyName () {
return new Promise((resolve, reject) => {
this.store.dispatch('resource/resourceVerifyName', {
- name: this.udfName,
+ fullName: '/'+this.udfName,
type: 'UDF'
}).then(res => {
resolve()
@@ -100,11 +106,17 @@
})
})
},
+ receivedValue(pid,name) {
+ this.pid = pid
+ this.currentDir = name
+ },
_formDataUpdate () {
let self = this
let formData = new FormData()
formData.append('file', this.file)
formData.append('type', 'UDF')
+ formData.append('pid', this.pid)
+ formData.append('currentDir', this.currentDir)
formData.append('name', this.udfName)
formData.append('description', this.udfDesc)
this.spinnerLoading = true
diff --git a/dolphinscheduler-ui/src/js/module/components/nav/nav.vue b/dolphinscheduler-ui/src/js/module/components/nav/nav.vue
index 7212f5146a..a46ff6fe74 100644
--- a/dolphinscheduler-ui/src/js/module/components/nav/nav.vue
+++ b/dolphinscheduler-ui/src/js/module/components/nav/nav.vue
@@ -155,6 +155,8 @@
import { mapState, mapActions } from 'vuex'
import { findComponentDownward } from '@/module/util/'
import mFileUpdate from '@/module/components/fileUpdate/fileUpdate'
+ import mFileChildUpdate from '@/module/components/fileUpdate/fileChildUpdate'
+ import mResourceChildUpdate from '@/module/components/fileUpdate/resourceChildUpdate'
import mDefinitionUpdate from '@/module/components/fileUpdate/definitionUpdate'
import mProgressBar from '@/module/components/progressBar/progressBar'
@@ -260,6 +262,86 @@
}
})
},
+ _fileChildUpdate (type,data) {
+ if (this.progress) {
+ this._toggleArchive()
+ return
+ }
+ let self = this
+ let modal = this.$modal.dialog({
+ closable: false,
+ showMask: true,
+ escClose: true,
+ className: 'update-file-modal',
+ transitionName: 'opacityp',
+ render (h) {
+ return h(mFileChildUpdate, {
+ on: {
+ onProgress (val) {
+ self.progress = val
+ },
+ onUpdate () {
+ findComponentDownward(self.$root, `resource-list-index-${type}`)._updateList(data)
+ self.isUpdate = false
+ self.progress = 0
+ modal.remove()
+ },
+ onArchive () {
+ self.isUpdate = true
+ },
+ close () {
+ self.progress = 0
+ modal.remove()
+ }
+ },
+ props: {
+ type: type,
+ id: data
+ }
+ })
+ }
+ })
+ },
+ _resourceChildUpdate (type,data) {
+ if (this.progress) {
+ this._toggleArchive()
+ return
+ }
+ let self = this
+ let modal = this.$modal.dialog({
+ closable: false,
+ showMask: true,
+ escClose: true,
+ className: 'update-file-modal',
+ transitionName: 'opacityp',
+ render (h) {
+ return h(mResourceChildUpdate, {
+ on: {
+ onProgress (val) {
+ self.progress = val
+ },
+ onUpdate () {
+ findComponentDownward(self.$root, `resource-list-index-${type}`)._updateList(data)
+ self.isUpdate = false
+ self.progress = 0
+ modal.remove()
+ },
+ onArchive () {
+ self.isUpdate = true
+ },
+ close () {
+ self.progress = 0
+ modal.remove()
+ }
+ },
+ props: {
+ type: type,
+ id: data
+ }
+ })
+ }
+ })
+ },
/**
* Upload popup layer display
*/
diff --git a/dolphinscheduler-ui/src/js/module/components/secondaryMenu/_source/menu.js b/dolphinscheduler-ui/src/js/module/components/secondaryMenu/_source/menu.js
index 2ed0fc1234..baf2cf7808 100644
--- a/dolphinscheduler-ui/src/js/module/components/secondaryMenu/_source/menu.js
+++ b/dolphinscheduler-ui/src/js/module/components/secondaryMenu/_source/menu.js
@@ -148,13 +148,13 @@ let menu = {
children: [
{
name: `${i18n.$t('Resource manage')}`,
- path: 'resource-udf-resource',
+ path: 'resource-udf',
id: 0,
disabled: true
},
{
name: `${i18n.$t('Function manage')}`,
- path: 'resource-udf-function',
+ path: 'resource-func',
id: 1,
disabled: true
}
diff --git a/dolphinscheduler-ui/src/js/module/components/transfer/resource.vue b/dolphinscheduler-ui/src/js/module/components/transfer/resource.vue
index 72205dec4d..97397c1809 100644
--- a/dolphinscheduler-ui/src/js/module/components/transfer/resource.vue
+++ b/dolphinscheduler-ui/src/js/module/components/transfer/resource.vue
@@ -20,26 +20,21 @@
- {{$t('File resources')}}
- {{$t('UDF resources')}}
+ {{$t('File resources')}}
+ {{$t('UDF resources')}}
-
+
+ {{ node.raw.fullName }}
+
+
+ {{ node.raw.fullName }}
+
+
-
-
+
-->
@@ -80,6 +64,9 @@
import _ from 'lodash'
import mPopup from '@/module/components/popup/popup'
import mListBoxF from '@/module/components/listBoxF/listBoxF'
+ import Treeselect from '@riophae/vue-treeselect'
+ import '@riophae/vue-treeselect/dist/vue-treeselect.css'
+
export default {
name: 'transfer',
@@ -92,11 +79,22 @@
cacheTargetList: this.fileTargetList,
fileSource: this.fileSourceList,
+ fileList: [],
+ udfList: [],
+ selectFileSource: [],
+ selectUdfSource: [],
fileTarget: this.fileTargetList,
udfSource: this.udfSourceList,
udfTarget: this.udfTargetList,
searchSourceVal: '',
- searchTargetVal: ''
+ searchTargetVal: '',
+ // define default value
+ value: null,
+ normalizer(node) {
+ return {
+ label: node.name
+ }
+ },
}
},
props: {
@@ -106,12 +104,22 @@
fileTargetList: Array,
udfTargetList: Array,
},
+ created() {
+ let file = this.fileSourceList
+ let udf = this.udfSourceList
+ this.diGuiTree(file)
+ this.diGuiTree(udf)
+ this.fileList = file
+ this.udfList = udf
+ this.selectFileSource = this.fileTargetList
+ this.selectUdfSource = this.udfTargetList
+ },
methods: {
_ok () {
this.$refs['popup'].spinnerLoading = true
setTimeout(() => {
this.$refs['popup'].spinnerLoading = false
- this.$emit('onUpdate', _.map(this.fileTarget.concat(this.udfTarget), v => v.id).join(','))
+ this.$emit('onUpdate', _.map(this.selectFileSource.concat(this.selectUdfSource), v => v).join(','))
}, 800)
},
_ckFile() {
@@ -169,6 +177,12 @@
this.udfSource = this.sourceList
this.udfTarget = this.targetList
}
+ },
+ diGuiTree(item) { // Recursive convenience tree structure
+ item.forEach(item => {
+ item.children === '' || item.children === undefined || item.children === null || item.children.length === 0?
+ delete item.children : this.diGuiTree(item.children);
+ })
}
},
watch: {
@@ -187,7 +201,7 @@
this._targetQuery()
}
},
- components: { mPopup, mListBoxF }
+ components: { mPopup, mListBoxF, Treeselect }
}
diff --git a/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js b/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
index 819af7c120..53a40af091 100755
--- a/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
+++ b/dolphinscheduler-ui/src/js/module/i18n/locale/en_US.js
@@ -138,6 +138,7 @@ export default {
'jdbc connect parameters': 'jdbc connect parameters',
'Test Connect': 'Test Connect',
'Please enter resource name': 'Please enter resource name',
+ 'Please enter resource folder name': 'Please enter resource folder name',
'Please enter a non-query SQL statement': 'Please enter a non-query SQL statement',
'Please enter IP/hostname': 'Please enter IP/hostname',
'jdbc connection parameters is not a correct JSON format': 'jdbc connection parameters is not a correct JSON format',
@@ -183,6 +184,8 @@ export default {
'Authorize': 'Authorize',
'File resources': 'File resources',
'UDF resources': 'UDF resources',
+ 'Please select UDF resources directory': 'Please select UDF resources directory',
+ 'UDF resources directory' : 'UDF resources directory',
'Upload File Size': 'Upload File size cannot exceed 1g',
'Edit alarm group': 'Edit alarm group',
'Create alarm group': 'Create alarm group',
@@ -226,8 +229,11 @@ export default {
'execution': 'execution',
'finish': 'finish',
'Create File': 'Create File',
+ 'Create folder': 'Create folder',
'File Name': 'File Name',
+ 'Folder Name': 'Folder Name',
'File Format': 'File Format',
+ 'Folder Format': 'Folder Format',
'File Content': 'File Content',
'Create': 'Create',
'Please enter the resource content': 'Please enter the resource content',
@@ -274,6 +280,9 @@ export default {
'Edit UDF Function': 'Edit UDF Function',
'type': 'type',
'UDF Function Name': 'UDF Function Name',
+ 'FILE': 'FILE',
+ 'UDF': 'UDF',
+ 'File Subdirectory': 'File Subdirectory',
'Please enter a function name': 'Please enter a function name',
'Package Name': 'Package Name',
'Please enter a Package name': 'Please enter a Package name',
@@ -523,6 +532,10 @@ export default {
'0 means unlimited by byte': '0 means unlimited',
'0 means unlimited by count': '0 means unlimited',
'Modify User': 'Modify User',
+ 'Whether directory': 'Whether directory',
+ 'Yes': 'Yes',
+ 'No': 'No',
+ 'Modify User': 'Modify User',
'Please enter Mysql Database(required)': 'Please enter Mysql Database(required)',
'Please enter Mysql Table(required)': 'Please enter Mysql Table(required)',
'Please enter Columns (Comma separated)': 'Please enter Columns (Comma separated)',
diff --git a/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js b/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
index fe67bae375..9b78f36ed3 100755
--- a/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
+++ b/dolphinscheduler-ui/src/js/module/i18n/locale/zh_CN.js
@@ -120,6 +120,9 @@ export default {
'SQL Parameter': 'sql参数',
'SQL Statement': 'sql语句',
'UDF Function': 'UDF函数',
+ 'FILE': '文件',
+ 'UDF': 'UDF',
+ 'File Subdirectory': '文件子目录',
'Please enter a SQL Statement(required)': '请输入sql语句(必填)',
'Please enter a JSON Statement(required)': '请输入json语句(必填)',
'One form or attachment must be selected': '表格、附件必须勾选一个',
@@ -139,6 +142,7 @@ export default {
'jdbc connect parameters': 'jdbc连接参数',
'Test Connect': '测试连接',
'Please enter resource name': '请输入数据源名称',
+ 'Please enter resource folder name': '请输入资源文件夹名称',
'Please enter a non-query SQL statement': '请输入非查询sql语句',
'Please enter IP/hostname': '请输入IP/主机名',
'jdbc connection parameters is not a correct JSON format': 'jdbc连接参数不是一个正确的JSON格式',
@@ -182,6 +186,8 @@ export default {
'Authorize': '授权',
'File resources': '文件资源',
'UDF resources': 'UDF资源',
+ 'UDF resources directory': 'UDF资源目录',
+ 'Please select UDF resources directory': '请选择UDF资源目录',
'Edit alarm group': '编辑告警组',
'Create alarm group': '创建告警组',
'Group Name': '组名称',
@@ -224,8 +230,11 @@ export default {
'execution': '执行中',
'finish': '完成',
'Create File': '创建文件',
+ 'Create folder': '创建文件夹',
'File Name': '文件名称',
+ 'Folder Name': '文件夹名称',
'File Format': '文件格式',
+ 'Folder Format': '文件夹格式',
'File Content': '文件内容',
'Upload File Size': '文件大小不能超过1G',
'Create': '创建',
@@ -523,6 +532,10 @@ export default {
'0 means unlimited by byte': 'KB,0代表不限制',
'0 means unlimited by count': '0代表不限制',
'Modify User': '修改用户',
+ 'Whether directory' : '是否文件夹',
+ 'Yes': '是',
+ 'No': '否',
+ 'Modify User': '修改用户',
'Please enter Mysql Database(required)': '请输入Mysql数据库(必填)',
'Please enter Mysql Table(required)': '请输入Mysql表名(必填)',
'Please enter Columns (Comma separated)': '请输入列名,用 , 隔开',
diff --git a/dolphinscheduler-ui/src/js/module/util/routerUtil.js b/dolphinscheduler-ui/src/js/module/util/routerUtil.js
index 7ae91f0c22..c19a8e7609 100644
--- a/dolphinscheduler-ui/src/js/module/util/routerUtil.js
+++ b/dolphinscheduler-ui/src/js/module/util/routerUtil.js
@@ -19,7 +19,7 @@ import merge from 'webpack-merge'
import router from '@/conf/home/router'
export function setUrlParams (o) {
- router.push({
- query: merge(router.history.current.query, o)
- })
+ // router.push({
+ // query: merge(router.history.current.query, o)
+ // })
}
diff --git a/dolphinscheduler-ui/src/sass/common/_mixin.scss b/dolphinscheduler-ui/src/sass/common/_mixin.scss
deleted file mode 100644
index c6a5afeef5..0000000000
--- a/dolphinscheduler-ui/src/sass/common/_mixin.scss
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/sass/common/index.scss b/dolphinscheduler-ui/src/sass/common/index.scss
index 403f3c6980..8e47b033cb 100644
--- a/dolphinscheduler-ui/src/sass/common/index.scss
+++ b/dolphinscheduler-ui/src/sass/common/index.scss
@@ -14,8 +14,6 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
-@import "mixin";
@import "animation";
@import "scrollbar";
@import "table";
From c706b21c621cca6c04f5658abfba5bc4f06415c7 Mon Sep 17 00:00:00 2001
From: liwenhe1993 <32166572+liwenhe1993@users.noreply.github.com>
Date: Sat, 28 Mar 2020 10:42:46 +0800
Subject: [PATCH 32/58] =?UTF-8?q?taskProps.getScheduleTime()=20may=20be=20?=
=?UTF-8?q?null,=20but=20there=20is=20no=20check=20if=20it=20=E2=80=A6=20(?=
=?UTF-8?q?#2256)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* taskProps.getScheduleTime() may be null, but there is no check if it is null or not
* Add unit test
---
.../server/worker/task/shell/ShellTask.java | 17 +++---
.../worker/task/shell/ShellTaskTest.java | 60 ++++++++++++++++---
2 files changed, 63 insertions(+), 14 deletions(-)
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java
index 165430b5fd..fae514c03d 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTask.java
@@ -143,13 +143,16 @@ public class ShellTask extends AbstractTask {
taskProps.getCmdTypeIfComplement(),
taskProps.getScheduleTime());
-// replace variable TIME with $[YYYYmmddd...] in shell file when history run job and batch complement job
- if(paramsMap != null && taskProps.getScheduleTime()!=null) {
- String dateTime = DateUtils.format(taskProps.getScheduleTime(), Constants.PARAMETER_FORMAT_TIME);
- Property p = new Property();
- p.setValue(dateTime);
- p.setProp(Constants.PARAMETER_SHECDULE_TIME);
- paramsMap.put(Constants.PARAMETER_SHECDULE_TIME, p);
+ // new
+ // replace variable TIME with $[YYYYmmddd...] in shell file when history run job and batch complement job
+ if (paramsMap != null) {
+ if (taskProps.getScheduleTime() != null) {
+ String dateTime = DateUtils.format(taskProps.getScheduleTime(), Constants.PARAMETER_FORMAT_TIME);
+ Property p = new Property();
+ p.setValue(dateTime);
+ p.setProp(Constants.PARAMETER_SHECDULE_TIME);
+ paramsMap.put(Constants.PARAMETER_SHECDULE_TIME, p);
+ }
script = ParameterUtils.convertParameterPlaceholders2(script, ParamUtils.convert(paramsMap));
}
diff --git a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java
index ebe90147d1..387c7c5e53 100644
--- a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java
+++ b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/shell/ShellTaskTest.java
@@ -25,13 +25,10 @@ import org.apache.dolphinscheduler.server.worker.task.ShellCommandExecutor;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
import org.apache.dolphinscheduler.service.bean.SpringApplicationContext;
import org.apache.dolphinscheduler.service.process.ProcessService;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Assume;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.*;
import org.junit.runner.RunWith;
import org.powermock.api.mockito.PowerMockito;
+import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
import org.powermock.modules.junit4.PowerMockRunner;
import org.slf4j.Logger;
@@ -45,6 +42,7 @@ import java.util.Date;
*/
@RunWith(PowerMockRunner.class)
@PrepareForTest(OSUtils.class)
+@PowerMockIgnore({"javax.management.*"})
public class ShellTaskTest {
private static final Logger logger = LoggerFactory.getLogger(ShellTaskTest.class);
@@ -136,6 +134,28 @@ public class ShellTaskTest {
}
}
+ @Test
+ public void testInitException() {
+ TaskProps props = new TaskProps();
+ props.setTaskDir("/tmp");
+ props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
+ props.setTaskInstId(1);
+ props.setTenantCode("1");
+ props.setEnvFile(".dolphinscheduler_env.sh");
+ props.setTaskStartTime(new Date());
+ props.setTaskTimeout(0);
+ props.setTaskParams("{\"rawScript\": \"\"}");
+ ShellTask shellTask = new ShellTask(props, logger);
+ try {
+ shellTask.init();
+ } catch (Exception e) {
+ logger.info(e.getMessage(), e);
+ if (e.getMessage().contains("shell task params is not valid")) {
+ Assert.assertTrue(true);
+ }
+ }
+ }
+
/**
* Method: init for Windows
*/
@@ -157,7 +177,20 @@ public class ShellTaskTest {
public void testHandleForUnix() throws Exception {
try {
PowerMockito.when(OSUtils.isWindows()).thenReturn(false);
- shellTask.handle();
+ TaskProps props = new TaskProps();
+ props.setTaskDir("/tmp");
+ props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
+ props.setTaskInstId(1);
+ props.setTenantCode("1");
+ props.setEnvFile(".dolphinscheduler_env.sh");
+ props.setTaskStartTime(new Date());
+ props.setTaskTimeout(0);
+ props.setScheduleTime(new Date());
+ props.setCmdTypeIfComplement(CommandType.START_PROCESS);
+ props.setTaskParams("{\"rawScript\": \" echo ${test}\", \"localParams\": [{\"prop\":\"test\", \"direct\":\"IN\", \"type\":\"VARCHAR\", \"value\":\"123\"}]}");
+ ShellTask shellTask1 = new ShellTask(props, logger);
+ shellTask1.init();
+ shellTask1.handle();
Assert.assertTrue(true);
} catch (Error | Exception e) {
if (!e.getMessage().contains("process error . exitCode is : -1")
@@ -174,7 +207,20 @@ public class ShellTaskTest {
public void testHandleForWindows() throws Exception {
try {
Assume.assumeTrue(OSUtils.isWindows());
- shellTask.handle();
+ TaskProps props = new TaskProps();
+ props.setTaskDir("/tmp");
+ props.setTaskAppId(String.valueOf(System.currentTimeMillis()));
+ props.setTaskInstId(1);
+ props.setTenantCode("1");
+ props.setEnvFile(".dolphinscheduler_env.sh");
+ props.setTaskStartTime(new Date());
+ props.setTaskTimeout(0);
+ props.setScheduleTime(new Date());
+ props.setCmdTypeIfComplement(CommandType.START_PROCESS);
+ props.setTaskParams("{\"rawScript\": \" echo ${test}\", \"localParams\": [{\"prop\":\"test\", \"direct\":\"IN\", \"type\":\"VARCHAR\", \"value\":\"123\"}]}");
+ ShellTask shellTask1 = new ShellTask(props, logger);
+ shellTask1.init();
+ shellTask1.handle();
Assert.assertTrue(true);
} catch (Error | Exception e) {
if (!e.getMessage().contains("process error . exitCode is : -1")) {
From 48d7612cd50cbbb82bdc479afeafd2e7f0022da0 Mon Sep 17 00:00:00 2001
From: "gabry.wu"
Date: Sat, 28 Mar 2020 11:50:38 +0800
Subject: [PATCH 33/58] Adapting partial code(file name start with S #2) to the
sonar cloud rule (#2270)
---
.../api/enums/StatusTest.java | 2 +-
.../common/task/spark/SparkParameters.java | 10 +++++---
.../common/thread/Stopper.java | 2 +-
.../common/utils/StringTest.java | 6 -----
.../runner/SubProcessTaskExecThread.java | 6 ++---
.../server/worker/task/sql/SqlTask.java | 23 ++++---------------
.../worker/task/spark/SparkTaskTest.java | 2 +-
7 files changed, 17 insertions(+), 34 deletions(-)
diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java
index 0c9ddff791..4e31a71e9d 100644
--- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java
+++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/enums/StatusTest.java
@@ -28,7 +28,7 @@ public class StatusTest {
@Test
public void testGetCode() {
- assertEquals(Status.SUCCESS.getCode(), 0);
+ assertEquals(0, Status.SUCCESS.getCode());
assertNotEquals(Status.REQUEST_PARAMS_NOT_VALID_ERROR.getCode(), 0);
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
index dbafddfddd..74982d5af9 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
@@ -20,6 +20,7 @@ import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
+import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
@@ -222,11 +223,14 @@ public class SparkParameters extends AbstractParameters {
@Override
public List getResourceFilesList() {
if(resourceList !=null ) {
- this.resourceList.add(mainJar);
- return resourceList.stream()
+ List resourceFilesList = resourceList.stream()
.map(ResourceInfo::getRes).collect(Collectors.toList());
+ if(mainJar != null){
+ resourceFilesList.add(mainJar.getRes());
+ }
+ return resourceFilesList;
}
- return null;
+ return Collections.emptyList();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java
index cad6914cb8..7353291054 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/thread/Stopper.java
@@ -23,7 +23,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
*/
public class Stopper {
- private static volatile AtomicBoolean signal = new AtomicBoolean(false);
+ private static AtomicBoolean signal = new AtomicBoolean(false);
public static final boolean isStopped(){
return signal.get();
diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringTest.java
index 99a2cf05bc..b14be21e60 100644
--- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringTest.java
+++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/utils/StringTest.java
@@ -24,12 +24,6 @@ import java.util.List;
public class StringTest {
-
- @Test
- public void test1(){
- System.out.println(String.format("%s_%010d_%010d", String.valueOf(1), Long.valueOf(3), Integer.valueOf(4)));
- }
-
@Test
public void stringCompareTest(){
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java
index fc16b5112b..13a59505bc 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/master/runner/SubProcessTaskExecThread.java
@@ -95,7 +95,7 @@ public class SubProcessTaskExecThread extends MasterBaseTaskExecThread {
* set task instance state
* @return
*/
- private Boolean setTaskInstanceState(){
+ private boolean setTaskInstanceState(){
subProcessInstance = processService.findSubProcessInstance(processInstance.getId(), taskInstance.getId());
if(subProcessInstance == null || taskInstance.getState().typeIsFinished()){
return false;
@@ -131,8 +131,8 @@ public class SubProcessTaskExecThread extends MasterBaseTaskExecThread {
if (taskInstance.getState().typeIsFinished()) {
logger.info("sub work flow task {} already complete. task state:{}, parent work flow instance state:{}",
this.taskInstance.getName(),
- this.taskInstance.getState().toString(),
- this.processInstance.getState().toString());
+ this.taskInstance.getState(),
+ this.processInstance.getState());
return;
}
while (Stopper.isRunning()) {
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
index ebc91563b4..ab314b6f8e 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
@@ -378,7 +378,7 @@ public class SqlTask extends AbstractTask {
List users = alertDao.queryUserByAlertGroupId(instance.getWarningGroupId());
// receiving group list
- List receviersList = new ArrayList();
+ List receviersList = new ArrayList<>();
for(User user:users){
receviersList.add(user.getEmail().trim());
}
@@ -392,7 +392,7 @@ public class SqlTask extends AbstractTask {
}
// copy list
- List receviersCcList = new ArrayList();
+ List receviersCcList = new ArrayList<>();
// Custom Copier
String receiversCc = sqlParameters.getReceiversCc();
if (StringUtils.isNotEmpty(receiversCc)){
@@ -406,7 +406,7 @@ public class SqlTask extends AbstractTask {
if(EnumUtils.isValidEnum(ShowType.class,showTypeName)){
Map mailResult = MailUtils.sendMails(receviersList,
receviersCcList, title, content, ShowType.valueOf(showTypeName));
- if(!(Boolean) mailResult.get(STATUS)){
+ if(!(boolean) mailResult.get(STATUS)){
throw new RuntimeException("send mail failed!");
}
}else{
@@ -463,22 +463,7 @@ public class SqlTask extends AbstractTask {
ProcessInstance processInstance = processService.findProcessInstanceByTaskId(taskProps.getTaskInstId());
int userId = processInstance.getExecutorId();
- PermissionCheck permissionCheckUdf = new PermissionCheck(AuthorizationType.UDF, processService,udfFunIds,userId,logger);
+ PermissionCheck permissionCheckUdf = new PermissionCheck<>(AuthorizationType.UDF, processService,udfFunIds,userId,logger);
permissionCheckUdf.checkPermission();
}
-
- /**
- * check data source permission
- * @param dataSourceId data source id
- * @return if has download permission return true else false
- */
- private void checkDataSourcePermission(int dataSourceId) throws Exception{
- // process instance
- ProcessInstance processInstance = processService.findProcessInstanceByTaskId(taskProps.getTaskInstId());
- int userId = processInstance.getExecutorId();
-
- PermissionCheck permissionCheckDataSource = new PermissionCheck(AuthorizationType.DATASOURCE, processService,new Integer[]{dataSourceId},userId,logger);
- permissionCheckDataSource.checkPermission();
- }
-
}
diff --git a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTaskTest.java b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTaskTest.java
index a18e0b2a9d..f0bcd9ec27 100644
--- a/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTaskTest.java
+++ b/dolphinscheduler-server/src/test/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTaskTest.java
@@ -135,7 +135,7 @@ public class SparkTaskTest {
logger.info("spark task command : {}", sparkArgs);
- Assert.assertEquals(sparkArgs.split(" ")[0], SPARK2_COMMAND );
+ Assert.assertEquals(SPARK2_COMMAND, sparkArgs.split(" ")[0]);
}
}
From 38c965ddc990b68b9f3d4948fa5091af58a49a4b Mon Sep 17 00:00:00 2001
From: lgcareer <18610854716@163.com>
Date: Sat, 28 Mar 2020 15:43:07 +0800
Subject: [PATCH 34/58] new feature for #404 add resource tree function (#2323)
* add create resource directory
* add create resource directory
* update the resource test
* add upgrade sql in version 1.2.2
* Adding request parameter id to update queryResourceListPaging
* set isDirectory value is false default
* add full name to update updateResource
* remove request parameter isDirectory to update createResource method
* update queryResourceListPaging with change get to post
* update updateResource method with remove fullName
* File management list modification (#1976)
* add resource component
* add resource tree visitor
* return json string
* update queryResourceList
* upload file need fullName
* add method rootNode
* Shell task resources and authorization resources (#1989)
* File management list modification
* Shell task resources and authorization resources
* download resource when execute task
* download resource when execute task
* update authorization type
* download resource when execute task
* Spark task resource changes (#1990)
* File management list modification
* Shell task resources and authorization resources
* Spark task resource changes
* download resource when execute task
* update udf function service
* add resource type in ResourceComponent
* UDF resource tree and change DAG style (#2019)
* File management list modification
* Shell task resources and authorization resources
* Spark task resource changes
* UDF resource tree and change DAG style
* add deleteIds method in ResourceMapper and ResourceMapperTest
* Add comments on class and method
* add queryResourceByName method in controller
* update verify-name with change name to full name
* update queryResource with add parameter pid
* update queryResource with add parameter pid
* add resource ids in process definition and delete resource need judge whether it is used by any process definition
* Breadcrumb development (#2033)
* File management list modification
* Shell task resources and authorization resources
* Spark task resource changes
* UDF resource tree and change DAG style
* Breadcrumb development
* Breadcrumb development
* Resource tree bug fix (#2040)
* File management list modification
* Shell task resources and authorization resources
* Spark task resource changes
* UDF resource tree and change DAG style
* Breadcrumb development
* Breadcrumb development
* Resource tree bug fix
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* Fix github action rerun failed (#2067)
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* Change crumb position
* Change crumb position (#2068)
* build resource process definition map
* UDF changed to multiple choice
* UDF changed to multiple choice (#2077)
* Change crumb position
* UDF changed to multiple choice
* build resource process definition map (#2076)
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* build resource process definition map
* update resource name also need update all the children full name
* need add queryResource
* update resource name also need update all the children full name (#2096)
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* build resource process definition map
* update resource name also need update all the children full name
* need add queryResource
* Limit customization file content to no more than 3000 lines
* Limit customization file content to no more than 3000 lines(#2128)
* Limit customization file content to no more than 3000 lines(#2128) (#2140)
* Change crumb position
* UDF changed to multiple choice
* Limit customization file content to no more than 3000 lines
* Limit customization file content to no more than 3000 lines(#2128)
* add queryResourceJarList
* add queryResourceJarList
* add queryResourceJarList
* add queryResourceJarList (#2192)
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* build resource process definition map
* update resource name also need update all the children full name
* need add queryResource
* add queryResourceJarList
* add queryResourceJarList
* add queryResourceJarList
* Modify the main jar package
* Modify the main jar package (#2200)
* Change crumb position
* UDF changed to multiple choice
* Limit customization file content to no more than 3000 lines
* Limit customization file content to no more than 3000 lines(#2128)
* Modify the main jar package
* add resource filter in order to get filtered resource
* add comments of resource filter
* update list children by resource
* choose main jar with resource tree (#2220)
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* build resource process definition map
* update resource name also need update all the children full name
* need add queryResource
* add queryResourceJarList
* add queryResourceJarList
* add queryResourceJarList
* add resource filter in order to get filtered resource
* add comments of resource filter
* update list children by resource
* Return null if query resource list is empty
* update queryResource method change parameter pid to id
* getResouDelete checksum and modify parameter namerceId
* revert .env
* remove parameter pid
* Delete request interface
* go back to the last page
* jar interface call
* Fix issue #2234 and #2228
* change resource name with full name
* Fix issue #2234 and #2228 (#2246)
* update resource service test
* Fix github action rerun failed
* add status of PARENT_RESOURCE_NOT_EXIST
* build resource process definition map
* update resource name also need update all the children full name
* need add queryResource
* add queryResourceJarList
* add queryResourceJarList
* add queryResourceJarList
* add resource filter in order to get filtered resource
* add comments of resource filter
* update list children by resource
* Return null if query resource list is empty
* update queryResource method change parameter pid to id
* revert .env
* remove parameter pid
* Fix issue #2234 and #2228
* change resource name with full name
* Fix list query value error
* remove unauth-file with authorize-resource-tree
* Repair data cannot be echoed
* Repair data cannot be echoed
* execute mr and spark task need query resource name before
* Authorized resource interface replacement
* Authorized resource interface replacement
* Filter UDF resources
* Change parameters
* need query all authorized directory children when create task
* Change normalize.scss import method and animation.scss license modification
* Delete file list update processing
* It's fixed that resource not deleted in hdfs when delete it.
* add tooltips
* add tooltips (#2310)
* Echo workflow name and modify udf management name
* [new feature]add resource tree function
* revert front code in order to be same as dev branch
* revert front code in order to be same as dev branch
* revert common.properties and application.properties
* add super method
* update flink parameter test
* update flink parameter and unit test
* update resource service test
* If resource list is empty,need init it
* update flink parameter test
Co-authored-by: break60 <790061044@qq.com>
Co-authored-by: xingchun-chen <55787491+xingchun-chen@users.noreply.github.com>
Co-authored-by: qiaozhanwei
---
.../api/controller/ResourcesController.java | 152 ++++-
.../api/dto/resources/Directory.java | 29 +
.../api/dto/resources/FileLeaf.java | 24 +
.../api/dto/resources/ResourceComponent.java | 193 ++++++
.../api/dto/resources/filter/IFilter.java | 28 +
.../dto/resources/filter/ResourceFilter.java | 100 ++++
.../visitor/ResourceTreeVisitor.java | 130 ++++
.../api/dto/resources/visitor/Visitor.java | 31 +
.../dolphinscheduler/api/enums/Status.java | 12 +-
.../api/service/ProcessDefinitionService.java | 34 +-
.../api/service/ResourcesService.java | 559 ++++++++++++++----
.../api/service/UdfFuncService.java | 4 +-
.../resources/filter/ResourceFilterTest.java | 58 ++
.../visitor/ResourceTreeVisitorTest.java | 82 +++
.../api/service/ResourcesServiceTest.java | 112 ++--
.../api/utils/CheckUtilsTest.java | 7 +-
.../common/enums/AuthorizationType.java | 12 +-
.../common/process/ResourceInfo.java | 10 +
.../common/task/AbstractParameters.java | 3 +-
.../common/task/IParameters.java | 4 +-
.../task/conditions/ConditionsParameters.java | 3 +-
.../common/task/datax/DataxParameters.java | 3 +-
.../task/dependent/DependentParameters.java | 3 +-
.../common/task/flink/FlinkParameters.java | 29 +-
.../common/task/http/HttpParameters.java | 3 +-
.../common/task/mr/MapreduceParameters.java | 20 +-
.../task/procedure/ProcedureParameters.java | 3 +-
.../common/task/python/PythonParameters.java | 10 +-
.../common/task/shell/ShellParameters.java | 9 +-
.../common/task/spark/SparkParameters.java | 20 +-
.../common/task/sql/SqlParameters.java | 3 +-
.../common/task/sqoop/SqoopParameters.java | 3 +-
.../task/subprocess/SubProcessParameters.java | 3 +-
.../common/utils/HadoopUtils.java | 49 +-
.../common/task/FlinkParametersTest.java | 14 +-
.../dao/entity/ProcessDefinition.java | 15 +
.../dolphinscheduler/dao/entity/Resource.java | 68 ++-
.../dao/mapper/ProcessDefinitionMapper.java | 11 +-
.../dao/mapper/ResourceMapper.java | 53 +-
.../dao/mapper/UdfFuncMapper.java | 15 +
.../dao/mapper/ProcessDefinitionMapper.xml | 7 +
.../dao/mapper/ResourceMapper.xml | 79 ++-
.../dao/mapper/UdfFuncMapper.xml | 24 +
.../dao/mapper/ResourceMapperTest.java | 57 +-
.../worker/runner/TaskScheduleThread.java | 50 +-
.../server/worker/task/AbstractYarnTask.java | 5 +
.../server/worker/task/flink/FlinkTask.java | 25 +
.../server/worker/task/mr/MapReduceTask.java | 26 +-
.../server/worker/task/spark/SparkTask.java | 26 +-
.../server/worker/task/sqoop/SqoopTask.java | 4 +
.../service/permission/PermissionCheck.java | 31 +
.../service/process/ProcessService.java | 26 +-
.../pages/dag/_source/plugIn/jsPlumbHandle.js | 2 +-
.../pages/file/pages/_source/common.js | 0
.../pages/file/pages/list/_source/list.vue | 0
.../pages/file/pages/list/_source/rename.vue | 2 +-
.../resource/pages/file/pages/list/index.vue | 0
.../js/conf/home/store/resource/actions.js | 0
.../components/fileUpdate/fileUpdate.vue | 0
sql/soft_version | 2 +-
.../mysql/dolphinscheduler_ddl.sql | 82 ++-
.../postgresql/dolphinscheduler_ddl.sql | 85 ++-
.../postgresql/dolphinscheduler_dml.sql | 4 +-
63 files changed, 2127 insertions(+), 331 deletions(-)
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java
create mode 100644 dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
create mode 100644 dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
create mode 100644 dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java
mode change 100644 => 100755 dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/_source/common.js
mode change 100644 => 100755 dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue
mode change 100644 => 100755 dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue
mode change 100644 => 100755 dolphinscheduler-ui/src/js/conf/home/store/resource/actions.js
mode change 100644 => 100755 dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
index f28ba100f2..40effb641d 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/ResourcesController.java
@@ -60,6 +60,50 @@ public class ResourcesController extends BaseController{
@Autowired
private UdfFuncService udfFuncService;
+ /**
+ * create resource
+ *
+ * @param loginUser login user
+ * @param alias alias
+ * @param description description
+ * @param type type
+ * @return create result code
+ */
+
+ /**
+ *
+ * @param loginUser login user
+ * @param type type
+ * @param alias alias
+ * @param description description
+ * @param pid parent id
+ * @param currentDir current directory
+ * @return
+ */
+ @ApiOperation(value = "createDirctory", notes= "CREATE_RESOURCE_NOTES")
+ @ApiImplicitParams({
+ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
+ @ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String"),
+ @ApiImplicitParam(name = "description", value = "RESOURCE_DESC", dataType ="String"),
+ @ApiImplicitParam(name = "file", value = "RESOURCE_FILE", required = true, dataType = "MultipartFile")
+ })
+ @PostMapping(value = "/directory/create")
+ public Result createDirectory(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
+ @RequestParam(value = "type") ResourceType type,
+ @RequestParam(value ="name") String alias,
+ @RequestParam(value = "description", required = false) String description,
+ @RequestParam(value ="pid") int pid,
+ @RequestParam(value ="currentDir") String currentDir) {
+ try {
+ logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
+ loginUser.getUserName(),type, alias, description,pid,currentDir);
+ return resourceService.createDirectory(loginUser,alias, description,type ,pid,currentDir);
+ } catch (Exception e) {
+ logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
+ return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
+ }
+ }
+
/**
* create resource
*
@@ -80,13 +124,15 @@ public class ResourcesController extends BaseController{
@PostMapping(value = "/create")
public Result createResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value = "type") ResourceType type,
- @RequestParam(value ="name")String alias,
+ @RequestParam(value ="name") String alias,
@RequestParam(value = "description", required = false) String description,
- @RequestParam("file") MultipartFile file) {
+ @RequestParam("file") MultipartFile file,
+ @RequestParam(value ="pid") int pid,
+ @RequestParam(value ="currentDir") String currentDir) {
try {
logger.info("login user {}, create resource, type: {}, resource alias: {}, desc: {}, file: {},{}",
loginUser.getUserName(),type, alias, description, file.getName(), file.getOriginalFilename());
- return resourceService.createResource(loginUser,alias, description,type ,file);
+ return resourceService.createResource(loginUser,alias, description,type ,file,pid,currentDir);
} catch (Exception e) {
logger.error(CREATE_RESOURCE_ERROR.getMsg(),e);
return error(CREATE_RESOURCE_ERROR.getCode(), CREATE_RESOURCE_ERROR.getMsg());
@@ -120,7 +166,7 @@ public class ResourcesController extends BaseController{
try {
logger.info("login user {}, update resource, type: {}, resource alias: {}, desc: {}",
loginUser.getUserName(),type, alias, description);
- return resourceService.updateResource(loginUser,resourceId,alias, description,type);
+ return resourceService.updateResource(loginUser,resourceId,alias,description,type);
} catch (Exception e) {
logger.error(UPDATE_RESOURCE_ERROR.getMsg(),e);
return error(Status.UPDATE_RESOURCE_ERROR.getCode(), Status.UPDATE_RESOURCE_ERROR.getMsg());
@@ -166,6 +212,7 @@ public class ResourcesController extends BaseController{
@ApiOperation(value = "queryResourceListPaging", notes= "QUERY_RESOURCE_LIST_PAGING_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
+ @ApiImplicitParam(name = "id", value = "RESOURCE_ID", required = true, dataType ="int"),
@ApiImplicitParam(name = "searchVal", value = "SEARCH_VAL", dataType ="String"),
@ApiImplicitParam(name = "pageNo", value = "PAGE_NO", dataType = "Int", example = "1"),
@ApiImplicitParam(name = "pageSize", value = "PAGE_SIZE", dataType ="Int",example = "20")
@@ -174,6 +221,7 @@ public class ResourcesController extends BaseController{
@ResponseStatus(HttpStatus.OK)
public Result queryResourceListPaging(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam(value ="type") ResourceType type,
+ @RequestParam(value ="id") int id,
@RequestParam("pageNo") Integer pageNo,
@RequestParam(value = "searchVal", required = false) String searchVal,
@RequestParam("pageSize") Integer pageSize
@@ -187,7 +235,7 @@ public class ResourcesController extends BaseController{
}
searchVal = ParameterUtils.handleEscapes(searchVal);
- result = resourceService.queryResourceListPaging(loginUser,type,searchVal,pageNo, pageSize);
+ result = resourceService.queryResourceListPaging(loginUser,id,type,searchVal,pageNo, pageSize);
return returnDataListPaging(result);
}catch (Exception e){
logger.error(QUERY_RESOURCES_LIST_PAGING.getMsg(),e);
@@ -227,32 +275,89 @@ public class ResourcesController extends BaseController{
* verify resource by alias and type
*
* @param loginUser login user
- * @param alias resource name
- * @param type resource type
+ * @param fullName resource full name
+ * @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
@ApiOperation(value = "verifyResourceName", notes= "VERIFY_RESOURCE_NAME_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
- @ApiImplicitParam(name = "name", value = "RESOURCE_NAME", required = true, dataType ="String")
+ @ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
})
@GetMapping(value = "/verify-name")
@ResponseStatus(HttpStatus.OK)
public Result verifyResourceName(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
- @RequestParam(value ="name") String alias,
+ @RequestParam(value ="fullName") String fullName,
@RequestParam(value ="type") ResourceType type
) {
try {
logger.info("login user {}, verfiy resource alias: {},resource type: {}",
- loginUser.getUserName(), alias,type);
+ loginUser.getUserName(), fullName,type);
- return resourceService.verifyResourceName(alias,type,loginUser);
+ return resourceService.verifyResourceName(fullName,type,loginUser);
} catch (Exception e) {
logger.error(VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg(), e);
return error(Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getCode(), Status.VERIFY_RESOURCE_BY_NAME_AND_TYPE_ERROR.getMsg());
}
}
+ /**
+ * query resources jar list
+ *
+ * @param loginUser login user
+ * @param type resource type
+ * @return resource list
+ */
+ @ApiOperation(value = "queryResourceJarList", notes= "QUERY_RESOURCE_LIST_NOTES")
+ @ApiImplicitParams({
+ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType")
+ })
+ @GetMapping(value="/list/jar")
+ @ResponseStatus(HttpStatus.OK)
+ public Result queryResourceJarList(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
+ @RequestParam(value ="type") ResourceType type
+ ){
+ try{
+ logger.info("query resource list, login user:{}, resource type:{}", loginUser.getUserName(), type.toString());
+ Map result = resourceService.queryResourceJarList(loginUser, type);
+ return returnDataList(result);
+ }catch (Exception e){
+ logger.error(QUERY_RESOURCES_LIST_ERROR.getMsg(),e);
+ return error(Status.QUERY_RESOURCES_LIST_ERROR.getCode(), Status.QUERY_RESOURCES_LIST_ERROR.getMsg());
+ }
+ }
+
+ /**
+ * query resource by full name and type
+ *
+ * @param loginUser login user
+ * @param fullName resource full name
+ * @param type resource type
+ * @return true if the resource name not exists, otherwise return false
+ */
+ @ApiOperation(value = "queryResource", notes= "QUERY_BY_RESOURCE_NAME")
+ @ApiImplicitParams({
+ @ApiImplicitParam(name = "type", value = "RESOURCE_TYPE", required = true, dataType ="ResourceType"),
+ @ApiImplicitParam(name = "fullName", value = "RESOURCE_FULL_NAME", required = true, dataType ="String")
+ })
+ @GetMapping(value = "/queryResource")
+ @ResponseStatus(HttpStatus.OK)
+ public Result queryResource(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
+ @RequestParam(value ="fullName",required = false) String fullName,
+ @RequestParam(value ="id",required = false) Integer id,
+ @RequestParam(value ="type") ResourceType type
+ ) {
+ try {
+ logger.info("login user {}, query resource by full name: {} or id: {},resource type: {}",
+ loginUser.getUserName(), fullName,id,type);
+
+ return resourceService.queryResource(fullName,id,type);
+ } catch (Exception e) {
+ logger.error(RESOURCE_NOT_EXIST.getMsg(), e);
+ return error(Status.RESOURCE_NOT_EXIST.getCode(), Status.RESOURCE_NOT_EXIST.getMsg());
+ }
+ }
+
/**
* view resource file online
*
@@ -310,16 +415,18 @@ public class ResourcesController extends BaseController{
@RequestParam(value ="fileName")String fileName,
@RequestParam(value ="suffix")String fileSuffix,
@RequestParam(value = "description", required = false) String description,
- @RequestParam(value = "content") String content
+ @RequestParam(value = "content") String content,
+ @RequestParam(value ="pid") int pid,
+ @RequestParam(value ="currentDir") String currentDir
) {
try{
logger.info("login user {}, online create resource! fileName : {}, type : {}, suffix : {},desc : {},content : {}",
- loginUser.getUserName(),fileName,type,fileSuffix,description,content);
+ loginUser.getUserName(),fileName,type,fileSuffix,description,content,pid,currentDir);
if(StringUtils.isEmpty(content)){
logger.error("resource file contents are not allowed to be empty");
return error(Status.RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg());
}
- return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content);
+ return resourceService.onlineCreateResource(loginUser,type,fileName,fileSuffix,description,content,pid,currentDir);
}catch (Exception e){
logger.error(CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg(),e);
return error(Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getCode(), Status.CREATE_RESOURCE_FILE_ON_LINE_ERROR.getMsg());
@@ -384,6 +491,9 @@ public class ResourcesController extends BaseController{
.ok()
.header(HttpHeaders.CONTENT_DISPOSITION, "attachment; filename=\"" + file.getFilename() + "\"")
.body(file);
+ }catch (RuntimeException e){
+ logger.error(e.getMessage(),e);
+ return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(e.getMessage());
}catch (Exception e){
logger.error(DOWNLOAD_RESOURCE_FILE_ERROR.getMsg(),e);
return ResponseEntity.status(HttpStatus.BAD_REQUEST).body(Status.DOWNLOAD_RESOURCE_FILE_ERROR.getMsg());
@@ -658,21 +768,21 @@ public class ResourcesController extends BaseController{
* @param userId user id
* @return unauthorized result code
*/
- @ApiOperation(value = "unauthorizedFile", notes= "UNAUTHORIZED_FILE_NOTES")
+ @ApiOperation(value = "authorizeResourceTree", notes= "AUTHORIZE_RESOURCE_TREE_NOTES")
@ApiImplicitParams({
@ApiImplicitParam(name = "userId", value = "USER_ID", required = true, dataType ="Int", example = "100")
})
- @GetMapping(value = "/unauth-file")
+ @GetMapping(value = "/authorize-resource-tree")
@ResponseStatus(HttpStatus.CREATED)
- public Result unauthorizedFile(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
+ public Result authorizeResourceTree(@ApiIgnore @RequestAttribute(value = Constants.SESSION_USER) User loginUser,
@RequestParam("userId") Integer userId) {
try{
- logger.info("resource unauthorized file, user:{}, unauthorized user id:{}", loginUser.getUserName(), userId);
- Map result = resourceService.unauthorizedFile(loginUser, userId);
+ logger.info("all resource file, user:{}, user id:{}", loginUser.getUserName(), userId);
+ Map result = resourceService.authorizeResourceTree(loginUser, userId);
return returnDataList(result);
}catch (Exception e){
- logger.error(UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg(),e);
- return error(Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getCode(), Status.UNAUTHORIZED_FILE_RESOURCE_ERROR.getMsg());
+ logger.error(AUTHORIZE_RESOURCE_TREE.getMsg(),e);
+ return error(Status.AUTHORIZE_RESOURCE_TREE.getCode(), Status.AUTHORIZE_RESOURCE_TREE.getMsg());
}
}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
new file mode 100644
index 0000000000..289d5060bf
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/Directory.java
@@ -0,0 +1,29 @@
+package org.apache.dolphinscheduler.api.dto.resources;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * directory
+ */
+public class Directory extends ResourceComponent{
+
+ @Override
+ public boolean isDirctory() {
+ return true;
+ }
+
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
new file mode 100644
index 0000000000..b9b91821f4
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/FileLeaf.java
@@ -0,0 +1,24 @@
+package org.apache.dolphinscheduler.api.dto.resources;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * file leaf
+ */
+public class FileLeaf extends ResourceComponent{
+
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
new file mode 100644
index 0000000000..fb0da702b3
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/ResourceComponent.java
@@ -0,0 +1,193 @@
+package org.apache.dolphinscheduler.api.dto.resources;
+
+import com.alibaba.fastjson.annotation.JSONField;
+import com.alibaba.fastjson.annotation.JSONType;
+import org.apache.dolphinscheduler.common.enums.ResourceType;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * resource component
+ */
+@JSONType(orders={"id","pid","name","fullName","description","isDirctory","children","type"})
+public abstract class ResourceComponent {
+ public ResourceComponent() {
+ }
+
+ public ResourceComponent(int id, int pid, String name, String fullName, String description, boolean isDirctory) {
+ this.id = id;
+ this.pid = pid;
+ this.name = name;
+ this.fullName = fullName;
+ this.description = description;
+ this.isDirctory = isDirctory;
+ int directoryFlag = isDirctory ? 1:0;
+ this.idValue = String.format("%s_%s",id,directoryFlag);
+ }
+
+
+ /**
+ * id
+ */
+ @JSONField(ordinal = 1)
+ protected int id;
+ /**
+ * parent id
+ */
+ @JSONField(ordinal = 2)
+ protected int pid;
+ /**
+ * name
+ */
+ @JSONField(ordinal = 3)
+ protected String name;
+ /**
+ * current directory
+ */
+ protected String currentDir;
+ /**
+ * full name
+ */
+ @JSONField(ordinal = 4)
+ protected String fullName;
+ /**
+ * description
+ */
+ @JSONField(ordinal = 5)
+ protected String description;
+ /**
+ * is directory
+ */
+ @JSONField(ordinal = 6)
+ protected boolean isDirctory;
+ /**
+ * id value
+ */
+ @JSONField(ordinal = 7)
+ protected String idValue;
+ /**
+ * resoruce type
+ */
+ @JSONField(ordinal = 8)
+ protected ResourceType type;
+ /**
+ * children
+ */
+ @JSONField(ordinal = 8)
+ protected List children = new ArrayList<>();
+
+ /**
+ * add resource component
+ * @param resourceComponent resource component
+ */
+ public void add(ResourceComponent resourceComponent){
+ children.add(resourceComponent);
+ }
+
+ public String getName(){
+ return this.name;
+ }
+
+ public String getDescription(){
+ return this.description;
+ }
+
+ public int getId() {
+ return id;
+ }
+
+ public void setId(int id) {
+ this.id = id;
+ }
+
+ public int getPid() {
+ return pid;
+ }
+
+ public void setPid(int pid) {
+ this.pid = pid;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getFullName() {
+ return fullName;
+ }
+
+ public void setFullName(String fullName) {
+ this.fullName = fullName;
+ }
+
+ public void setDescription(String description) {
+ this.description = description;
+ }
+
+ public boolean isDirctory() {
+ return isDirctory;
+ }
+
+ public void setDirctory(boolean dirctory) {
+ isDirctory = dirctory;
+ }
+
+ public String getIdValue() {
+ return idValue;
+ }
+
+ public void setIdValue(int id,boolean isDirctory) {
+ int directoryFlag = isDirctory ? 1:0;
+ this.idValue = String.format("%s_%s",id,directoryFlag);
+ }
+
+ public ResourceType getType() {
+ return type;
+ }
+
+ public void setType(ResourceType type) {
+ this.type = type;
+ }
+
+ public List getChildren() {
+ return children;
+ }
+
+ public void setChildren(List children) {
+ this.children = children;
+ }
+
+ @Override
+ public String toString() {
+ return "ResourceComponent{" +
+ "id=" + id +
+ ", pid=" + pid +
+ ", name='" + name + '\'' +
+ ", currentDir='" + currentDir + '\'' +
+ ", fullName='" + fullName + '\'' +
+ ", description='" + description + '\'' +
+ ", isDirctory=" + isDirctory +
+ ", idValue='" + idValue + '\'' +
+ ", type=" + type +
+ ", children=" + children +
+ '}';
+ }
+
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java
new file mode 100644
index 0000000000..ce6ce3a011
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/IFilter.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.dolphinscheduler.api.dto.resources.filter;
+
+import org.apache.dolphinscheduler.dao.entity.Resource;
+
+import java.util.List;
+
+/**
+ * interface filter
+ */
+public interface IFilter {
+ List filter();
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
new file mode 100644
index 0000000000..c918a160af
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilter.java
@@ -0,0 +1,100 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.dolphinscheduler.api.dto.resources.filter;
+
+import org.apache.dolphinscheduler.dao.entity.Resource;
+
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * resource filter
+ */
+public class ResourceFilter implements IFilter {
+ /**
+ * resource suffix
+ */
+ private String suffix;
+ /**
+ * resource list
+ */
+ private List resourceList;
+
+ /**
+ * parent list
+ */
+ //Set parentList = new HashSet<>();
+
+ /**
+ * constructor
+ * @param suffix resource suffix
+ * @param resourceList resource list
+ */
+ public ResourceFilter(String suffix, List resourceList) {
+ this.suffix = suffix;
+ this.resourceList = resourceList;
+ }
+
+ /**
+ * file filter
+ * @return file filtered by suffix
+ */
+ public Set fileFilter(){
+ Set resources = resourceList.stream().filter(t -> {
+ String alias = t.getAlias();
+ return alias.endsWith(suffix);
+ }).collect(Collectors.toSet());
+ return resources;
+ }
+
+ /**
+ * list all parent dir
+ * @return parent resource dir set
+ */
+ Set listAllParent(){
+ Set parentList = new HashSet<>();
+ Set filterFileList = fileFilter();
+ for(Resource file:filterFileList){
+ parentList.add(file);
+ setAllParent(file,parentList);
+ }
+ return parentList;
+
+ }
+
+ /**
+ * list all parent dir
+ * @param resource resource
+ * @return parent resource dir set
+ */
+ private void setAllParent(Resource resource,Set parentList){
+ for (Resource resourceTemp : resourceList) {
+ if (resourceTemp.getId() == resource.getPid()) {
+ parentList.add(resourceTemp);
+ setAllParent(resourceTemp,parentList);
+ }
+ }
+ }
+
+ @Override
+ public List filter() {
+ return new ArrayList<>(listAllParent());
+ }
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java
new file mode 100644
index 0000000000..5cf118800a
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitor.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.dolphinscheduler.api.dto.resources.visitor;
+
+
+import org.apache.dolphinscheduler.api.dto.resources.Directory;
+import org.apache.dolphinscheduler.api.dto.resources.FileLeaf;
+import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
+import org.apache.dolphinscheduler.dao.entity.Resource;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * resource tree visitor
+ */
+public class ResourceTreeVisitor implements Visitor{
+
+ /**
+ * resource list
+ */
+ private List resourceList;
+
+ public ResourceTreeVisitor() {
+ }
+
+ /**
+ * constructor
+ * @param resourceList resource list
+ */
+ public ResourceTreeVisitor(List resourceList) {
+ this.resourceList = resourceList;
+ }
+
+ /**
+ * visit
+ * @return resoruce component
+ */
+ public ResourceComponent visit() {
+ ResourceComponent rootDirectory = new Directory();
+ for (Resource resource : resourceList) {
+ // judge whether is root node
+ if (rootNode(resource)){
+ ResourceComponent tempResourceComponent = getResourceComponent(resource);
+ rootDirectory.add(tempResourceComponent);
+ tempResourceComponent.setChildren(setChildren(tempResourceComponent.getId(),resourceList));
+ }
+ }
+ return rootDirectory;
+ }
+
+ /**
+ * set children
+ * @param id id
+ * @param list resource list
+ * @return resource component list
+ */
+ public static List setChildren(int id, List list ){
+ List childList = new ArrayList<>();
+ for (Resource resource : list) {
+ if (id == resource.getPid()){
+ ResourceComponent tempResourceComponent = getResourceComponent(resource);
+ childList.add(tempResourceComponent);
+ }
+ }
+ for (ResourceComponent resourceComponent : childList) {
+ resourceComponent.setChildren(setChildren(resourceComponent.getId(),list));
+ }
+ if (childList.size()==0){
+ return new ArrayList<>();
+ }
+ return childList;
+ }
+
+ /**
+ * Determine whether it is the root node
+ * @param resource resource
+ * @return true if it is the root node
+ */
+ public boolean rootNode(Resource resource) {
+
+ boolean isRootNode = true;
+ if(resource.getPid() != -1 ){
+ for (Resource parent : resourceList) {
+ if (resource.getPid() == parent.getId()) {
+ isRootNode = false;
+ break;
+ }
+ }
+ }
+ return isRootNode;
+ }
+
+ /**
+ * get resource component by resource
+ * @param resource resource
+ * @return resource component
+ */
+ private static ResourceComponent getResourceComponent(Resource resource) {
+ ResourceComponent tempResourceComponent;
+ if(resource.isDirectory()){
+ tempResourceComponent = new Directory();
+ }else{
+ tempResourceComponent = new FileLeaf();
+ }
+ tempResourceComponent.setName(resource.getAlias());
+ tempResourceComponent.setFullName(resource.getFullName().replaceFirst("/",""));
+ tempResourceComponent.setId(resource.getId());
+ tempResourceComponent.setPid(resource.getPid());
+ tempResourceComponent.setIdValue(resource.getId(),resource.isDirectory());
+ tempResourceComponent.setDescription(resource.getDescription());
+ tempResourceComponent.setType(resource.getType());
+ return tempResourceComponent;
+ }
+
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
new file mode 100644
index 0000000000..3dfce7c7c1
--- /dev/null
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/dto/resources/visitor/Visitor.java
@@ -0,0 +1,31 @@
+package org.apache.dolphinscheduler.api.dto.resources.visitor;
+
+
+import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/**
+ * Visitor
+ */
+public interface Visitor {
+ /**
+ * visit
+ * @return resource component
+ */
+ ResourceComponent visit();
+}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
index 9955463f8e..416dc0ef54 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/enums/Status.java
@@ -97,7 +97,7 @@ public enum Status {
VERIFY_UDF_FUNCTION_NAME_ERROR( 10070,"verify udf function name error", "UDF函数名称验证错误"),
DELETE_UDF_FUNCTION_ERROR( 10071,"delete udf function error", "删除UDF函数错误"),
AUTHORIZED_FILE_RESOURCE_ERROR( 10072,"authorized file resource error", "授权资源文件错误"),
- UNAUTHORIZED_FILE_RESOURCE_ERROR( 10073,"unauthorized file resource error", "查询未授权资源错误"),
+ AUTHORIZE_RESOURCE_TREE( 10073,"authorize resource tree display error","授权资源目录树错误"),
UNAUTHORIZED_UDF_FUNCTION_ERROR( 10074,"unauthorized udf function error", "查询未授权UDF函数错误"),
AUTHORIZED_UDF_FUNCTION_ERROR(10075,"authorized udf function error", "授权UDF函数错误"),
CREATE_SCHEDULE_ERROR(10076,"create schedule error", "创建调度配置错误"),
@@ -184,10 +184,12 @@ public enum Status {
RESOURCE_SIZE_EXCEED_LIMIT(20007, "upload resource file size exceeds limit", "上传资源文件大小超过限制"),
RESOURCE_SUFFIX_FORBID_CHANGE(20008, "resource suffix not allowed to be modified", "资源文件后缀不支持修改"),
UDF_RESOURCE_SUFFIX_NOT_JAR(20009, "UDF resource suffix name must be jar", "UDF资源文件后缀名只支持[jar]"),
- HDFS_COPY_FAIL(20009, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
- RESOURCE_FILE_EXIST(20010, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
- RESOURCE_FILE_NOT_EXIST(20011, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
-
+ HDFS_COPY_FAIL(20010, "hdfs copy {0} -> {1} fail", "hdfs复制失败:[{0}] -> [{1}]"),
+ RESOURCE_FILE_EXIST(20011, "resource file {0} already exists in hdfs,please delete it or change name!", "资源文件[{0}]在hdfs中已存在,请删除或修改资源名"),
+ RESOURCE_FILE_NOT_EXIST(20012, "resource file {0} not exists in hdfs!", "资源文件[{0}]在hdfs中不存在"),
+ UDF_RESOURCE_IS_BOUND(20013, "udf resource file is bound by UDF functions:{0}","udf函数绑定了资源文件[{0}]"),
+ RESOURCE_IS_USED(20014, "resource file is used by process definition","资源文件被上线的流程定义使用了"),
+ PARENT_RESOURCE_NOT_EXIST(20015, "parent resource not exist","父资源文件不存在"),
USER_NO_OPERATION_PERM(30001, "user has no operation privilege", "当前用户没有操作权限"),
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
index 44c2b44ebb..734adb9b71 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessDefinitionService.java
@@ -38,11 +38,9 @@ import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.model.TaskNodeRelation;
import org.apache.dolphinscheduler.common.process.ProcessDag;
import org.apache.dolphinscheduler.common.process.Property;
+import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.thread.Stopper;
-import org.apache.dolphinscheduler.common.utils.CollectionUtils;
-import org.apache.dolphinscheduler.common.utils.DateUtils;
-import org.apache.dolphinscheduler.common.utils.JSONUtils;
-import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.*;
import org.apache.dolphinscheduler.dao.mapper.*;
import org.apache.dolphinscheduler.dao.utils.DagHelper;
@@ -162,6 +160,31 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
+ /**
+ * get resource ids
+ * @param processData process data
+ * @return resource ids
+ */
+ private String getResourceIds(ProcessData processData) {
+ List tasks = processData.getTasks();
+ Set resourceIds = new HashSet<>();
+ for(TaskNode taskNode : tasks){
+ String taskParameter = taskNode.getParams();
+ AbstractParameters params = TaskParametersUtils.getParameters(taskNode.getType(),taskParameter);
+ Set tempSet = params.getResourceFilesList().stream().map(t->t.getId()).collect(Collectors.toSet());
+ resourceIds.addAll(tempSet);
+ }
+
+ StringBuilder sb = new StringBuilder();
+ for(int i : resourceIds) {
+ if (sb.length() > 0) {
+ sb.append(",");
+ }
+ sb.append(i);
+ }
+ return sb.toString();
+ }
+
/**
* query proccess definition list
@@ -946,7 +969,9 @@ public class ProcessDefinitionService extends BaseDAGService {
return result;
}
+
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
+
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//process data check
@@ -1163,6 +1188,7 @@ public class ProcessDefinitionService extends BaseDAGService {
private DAG genDagGraph(ProcessDefinition processDefinition) throws Exception {
String processDefinitionJson = processDefinition.getProcessDefinitionJson();
+
ProcessData processData = JSONUtils.parseObject(processDefinitionJson, ProcessData.class);
//check process data
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
index f46eda757a..ff87aadbc7 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ResourcesService.java
@@ -16,9 +16,15 @@
*/
package org.apache.dolphinscheduler.api.service;
+import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.serializer.SerializerFeature;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.commons.collections.BeanMap;
+import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
+import org.apache.dolphinscheduler.api.dto.resources.filter.ResourceFilter;
+import org.apache.dolphinscheduler.api.dto.resources.visitor.ResourceTreeVisitor;
+import org.apache.dolphinscheduler.api.dto.resources.visitor.Visitor;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
@@ -39,6 +45,7 @@ import org.springframework.web.multipart.MultipartFile;
import java.text.MessageFormat;
import java.util.*;
+import java.util.stream.Collectors;
import static org.apache.dolphinscheduler.common.Constants.*;
@@ -65,6 +72,82 @@ public class ResourcesService extends BaseService {
@Autowired
private ResourceUserMapper resourceUserMapper;
+ @Autowired
+ private ProcessDefinitionMapper processDefinitionMapper;
+
+ /**
+ * create directory
+ *
+ * @param loginUser login user
+ * @param name alias
+ * @param description description
+ * @param type type
+ * @param pid parent id
+ * @param currentDir current directory
+ * @return create directory result
+ */
+ @Transactional(rollbackFor = Exception.class)
+ public Result createDirectory(User loginUser,
+ String name,
+ String description,
+ ResourceType type,
+ int pid,
+ String currentDir) {
+ Result result = new Result();
+ // if hdfs not startup
+ if (!PropertyUtils.getResUploadStartupState()){
+ logger.error("resource upload startup state: {}", PropertyUtils.getResUploadStartupState());
+ putMsg(result, Status.HDFS_NOT_STARTUP);
+ return result;
+ }
+ String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
+
+ if (pid != -1) {
+ Resource parentResource = resourcesMapper.selectById(pid);
+
+ if (parentResource == null) {
+ putMsg(result, Status.PARENT_RESOURCE_NOT_EXIST);
+ return result;
+ }
+
+ if (!hasPerm(loginUser, parentResource.getUserId())) {
+ putMsg(result, Status.USER_NO_OPERATION_PERM);
+ return result;
+ }
+ }
+
+
+ if (checkResourceExists(fullName, 0, type.ordinal())) {
+ logger.error("resource directory {} has exist, can't recreate", fullName);
+ putMsg(result, Status.RESOURCE_EXIST);
+ return result;
+ }
+
+ Date now = new Date();
+
+ Resource resource = new Resource(pid,name,fullName,true,description,name,loginUser.getId(),type,0,now,now);
+
+ try {
+ resourcesMapper.insert(resource);
+
+ putMsg(result, Status.SUCCESS);
+ Map dataMap = new BeanMap(resource);
+ Map resultMap = new HashMap();
+ for (Map.Entry entry: dataMap.entrySet()) {
+ if (!"class".equalsIgnoreCase(entry.getKey().toString())) {
+ resultMap.put(entry.getKey().toString(), entry.getValue());
+ }
+ }
+ result.setData(resultMap);
+ } catch (Exception e) {
+ logger.error("resource already exists, can't recreate ", e);
+ throw new RuntimeException("resource already exists, can't recreate");
+ }
+ //create directory in hdfs
+ createDirecotry(loginUser,fullName,type,result);
+ return result;
+ }
+
/**
* create resource
*
@@ -73,6 +156,8 @@ public class ResourcesService extends BaseService {
* @param desc description
* @param file file
* @param type type
+ * @param pid parent id
+ * @param currentDir current directory
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
@@ -80,7 +165,9 @@ public class ResourcesService extends BaseService {
String name,
String desc,
ResourceType type,
- MultipartFile file) {
+ MultipartFile file,
+ int pid,
+ String currentDir) {
Result result = new Result();
// if hdfs not startup
@@ -123,7 +210,8 @@ public class ResourcesService extends BaseService {
}
// check resoure name exists
- if (checkResourceExists(name, 0, type.ordinal())) {
+ String fullName = currentDir.equals("/") ? String.format("%s%s",currentDir,name):String.format("%s/%s",currentDir,name);
+ if (checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} has exist, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@@ -131,7 +219,9 @@ public class ResourcesService extends BaseService {
Date now = new Date();
- Resource resource = new Resource(name,file.getOriginalFilename(),desc,loginUser.getId(),type,file.getSize(),now,now);
+
+
+ Resource resource = new Resource(pid,name,fullName,false,desc,file.getOriginalFilename(),loginUser.getId(),type,file.getSize(),now,now);
try {
resourcesMapper.insert(resource);
@@ -151,7 +241,7 @@ public class ResourcesService extends BaseService {
}
// fail upload
- if (!upload(loginUser, name, file, type)) {
+ if (!upload(loginUser, fullName, file, type)) {
logger.error("upload resource: {} file: {} failed.", name, file.getOriginalFilename());
putMsg(result, Status.HDFS_OPERATION_ERROR);
throw new RuntimeException(String.format("upload resource: %s file: %s failed.", name, file.getOriginalFilename()));
@@ -162,27 +252,29 @@ public class ResourcesService extends BaseService {
/**
* check resource is exists
*
- * @param alias alias
+ * @param fullName fullName
* @param userId user id
* @param type type
* @return true if resource exists
*/
- private boolean checkResourceExists(String alias, int userId, int type ){
- List resources = resourcesMapper.queryResourceList(alias, userId, type);
- return CollectionUtils.isNotEmpty(resources);
- }
+ private boolean checkResourceExists(String fullName, int userId, int type ){
+ List resources = resourcesMapper.queryResourceList(fullName, userId, type);
+ if (resources != null && resources.size() > 0) {
+ return true;
+ }
+ return false;
+ }
/**
* update resource
- *
- * @param loginUser login user
- * @param name alias
- * @param resourceId resource id
- * @param type resource type
- * @param desc description
- * @return update result code
+ * @param loginUser login user
+ * @param resourceId resource id
+ * @param name name
+ * @param desc description
+ * @param type resource type
+ * @return update result code
*/
@Transactional(rollbackFor = Exception.class)
public Result updateResource(User loginUser,
@@ -216,7 +308,10 @@ public class ResourcesService extends BaseService {
}
//check resource aleady exists
- if (!resource.getAlias().equals(name) && checkResourceExists(name, 0, type.ordinal())) {
+ String originFullName = resource.getFullName();
+
+ String fullName = String.format("%s%s",originFullName.substring(0,originFullName.lastIndexOf("/")+1),name);
+ if (!resource.getAlias().equals(name) && checkResourceExists(fullName, 0, type.ordinal())) {
logger.error("resource {} already exists, can't recreate", name);
putMsg(result, Status.RESOURCE_EXIST);
return result;
@@ -227,25 +322,41 @@ public class ResourcesService extends BaseService {
if (StringUtils.isEmpty(tenantCode)){
return result;
}
-
- //get the file suffix
+ String nameWithSuffix = name;
String originResourceName = resource.getAlias();
- String suffix = originResourceName.substring(originResourceName.lastIndexOf('.'));
+ if (!resource.isDirectory()) {
+ //get the file suffix
- //if the name without suffix then add it ,else use the origin name
- String nameWithSuffix = name;
- if(!name.endsWith(suffix)){
- nameWithSuffix = nameWithSuffix + suffix;
+ String suffix = originResourceName.substring(originResourceName.lastIndexOf("."));
+
+ //if the name without suffix then add it ,else use the origin name
+ if(!name.endsWith(suffix)){
+ nameWithSuffix = nameWithSuffix + suffix;
+ }
}
// updateResource data
+ List childrenResource = listAllChildren(resource);
+ String oldFullName = resource.getFullName();
Date now = new Date();
+
resource.setAlias(nameWithSuffix);
+ resource.setFullName(fullName);
resource.setDescription(desc);
resource.setUpdateTime(now);
try {
resourcesMapper.updateById(resource);
+ if (resource.isDirectory() && CollectionUtils.isNotEmpty(childrenResource)) {
+ List childResourceList = new ArrayList<>();
+ List resourceList = resourcesMapper.listResourceByIds(childrenResource.toArray(new Integer[childrenResource.size()]));
+ childResourceList = resourceList.stream().map(t -> {
+ t.setFullName(t.getFullName().replaceFirst(oldFullName, fullName));
+ t.setUpdateTime(now);
+ return t;
+ }).collect(Collectors.toList());
+ resourcesMapper.batchUpdateResource(childResourceList);
+ }
putMsg(result, Status.SUCCESS);
Map dataMap = new BeanMap(resource);
@@ -267,15 +378,9 @@ public class ResourcesService extends BaseService {
// get file hdfs path
// delete hdfs file by type
- String originHdfsFileName = "";
- String destHdfsFileName = "";
- if (resource.getType().equals(ResourceType.FILE)) {
- originHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, originResourceName);
- destHdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, name);
- } else if (resource.getType().equals(ResourceType.UDF)) {
- originHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, originResourceName);
- destHdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
- }
+ String originHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,originFullName);
+ String destHdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(),tenantCode,fullName);
+
try {
if (HadoopUtils.getInstance().exists(originHdfsFileName)) {
logger.info("hdfs copy {} -> {}", originHdfsFileName, destHdfsFileName);
@@ -303,7 +408,7 @@ public class ResourcesService extends BaseService {
* @param pageSize page size
* @return resource list page
*/
- public Map queryResourceListPaging(User loginUser, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
+ public Map queryResourceListPaging(User loginUser, int direcotryId, ResourceType type, String searchVal, Integer pageNo, Integer pageSize) {
HashMap result = new HashMap<>(5);
Page page = new Page(pageNo, pageSize);
@@ -312,7 +417,7 @@ public class ResourcesService extends BaseService {
userId= 0;
}
IPage resourceIPage = resourcesMapper.queryResourcePaging(page,
- userId, type.ordinal(), searchVal);
+ userId,direcotryId, type.ordinal(), searchVal);
PageInfo pageInfo = new PageInfo(pageNo, pageSize);
pageInfo.setTotalCount((int)resourceIPage.getTotal());
pageInfo.setLists(resourceIPage.getRecords());
@@ -321,17 +426,46 @@ public class ResourcesService extends BaseService {
return result;
}
+ /**
+ * create direcoty
+ * @param loginUser login user
+ * @param fullName full name
+ * @param type resource type
+ * @param result Result
+ */
+ private void createDirecotry(User loginUser,String fullName,ResourceType type,Result result){
+ // query tenant
+ String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
+ String directoryName = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
+ String resourceRootPath = HadoopUtils.getHdfsDir(type,tenantCode);
+ try {
+ if (!HadoopUtils.getInstance().exists(resourceRootPath)) {
+ createTenantDirIfNotExists(tenantCode);
+ }
+
+ if (!HadoopUtils.getInstance().mkdir(directoryName)) {
+ logger.error("create resource directory {} of hdfs failed",directoryName);
+ putMsg(result,Status.HDFS_OPERATION_ERROR);
+ throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
+ }
+ } catch (Exception e) {
+ logger.error("create resource directory {} of hdfs failed",directoryName);
+ putMsg(result,Status.HDFS_OPERATION_ERROR);
+ throw new RuntimeException(String.format("create resource directory: %s failed.", directoryName));
+ }
+ }
+
/**
* upload file to hdfs
*
- * @param loginUser
- * @param name
- * @param file
+ * @param loginUser login user
+ * @param fullName full name
+ * @param file file
*/
- private boolean upload(User loginUser, String name, MultipartFile file, ResourceType type) {
+ private boolean upload(User loginUser, String fullName, MultipartFile file, ResourceType type) {
// save to local
String fileSuffix = FileUtils.suffix(file.getOriginalFilename());
- String nameSuffix = FileUtils.suffix(name);
+ String nameSuffix = FileUtils.suffix(fullName);
// determine file suffix
if (!(StringUtils.isNotEmpty(fileSuffix) && fileSuffix.equalsIgnoreCase(nameSuffix))) {
@@ -344,15 +478,8 @@ public class ResourcesService extends BaseService {
// save file to hdfs, and delete original file
- String hdfsFilename = "";
- String resourcePath = "";
- if (type.equals(ResourceType.FILE)) {
- hdfsFilename = HadoopUtils.getHdfsFilename(tenantCode, name);
- resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
- } else if (type.equals(ResourceType.UDF)) {
- hdfsFilename = HadoopUtils.getHdfsUdfFilename(tenantCode, name);
- resourcePath = HadoopUtils.getHdfsUdfDir(tenantCode);
- }
+ String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
+ String resourcePath = HadoopUtils.getHdfsDir(type,tenantCode);
try {
// if tenant dir not exists
if (!HadoopUtils.getInstance().exists(resourcePath)) {
@@ -377,13 +504,59 @@ public class ResourcesService extends BaseService {
public Map queryResourceList(User loginUser, ResourceType type) {
Map result = new HashMap<>(5);
- List resourceList;
+
+ Set allResourceList = getAllResources(loginUser, type);
+ Visitor resourceTreeVisitor = new ResourceTreeVisitor(new ArrayList<>(allResourceList));
+ //JSONArray jsonArray = JSON.parseArray(JSON.toJSONString(resourceTreeVisitor.visit().getChildren(), SerializerFeature.SortField));
+ result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
+ putMsg(result,Status.SUCCESS);
+
+ return result;
+ }
+
+ /**
+ * get all resources
+ * @param loginUser login user
+ * @return all resource set
+ */
+ private Set getAllResources(User loginUser, ResourceType type) {
int userId = loginUser.getId();
+ boolean listChildren = true;
if(isAdmin(loginUser)){
userId = 0;
+ listChildren = false;
+ }
+ List resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
+ Set allResourceList = new HashSet<>(resourceList);
+ if (listChildren) {
+ Set authorizedIds = new HashSet<>();
+ List authorizedDirecoty = resourceList.stream().filter(t->t.getUserId() != loginUser.getId() && t.isDirectory()).collect(Collectors.toList());
+ if (CollectionUtils.isNotEmpty(authorizedDirecoty)) {
+ for(Resource resource : authorizedDirecoty){
+ authorizedIds.addAll(listAllChildren(resource));
+ }
+ List childrenResources = resourcesMapper.listResourceByIds(authorizedIds.toArray(new Integer[authorizedIds.size()]));
+ allResourceList.addAll(childrenResources);
+ }
}
- resourceList = resourcesMapper.queryResourceListAuthored(userId, type.ordinal());
- result.put(Constants.DATA_LIST, resourceList);
+ return allResourceList;
+ }
+
+ /**
+ * query resource list
+ *
+ * @param loginUser login user
+ * @param type resource type
+ * @return resource list
+ */
+ public Map queryResourceJarList(User loginUser, ResourceType type) {
+
+ Map result = new HashMap<>(5);
+
+ Set allResourceList = getAllResources(loginUser, type);
+ List resources = new ResourceFilter(".jar",new ArrayList<>(allResourceList)).filter();
+ Visitor resourceTreeVisitor = new ResourceTreeVisitor(resources);
+ result.put(Constants.DATA_LIST, resourceTreeVisitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
@@ -419,23 +592,51 @@ public class ResourcesService extends BaseService {
putMsg(result, Status.USER_NO_OPERATION_PERM);
return result;
}
+ //if resource type is UDF,need check whether it is bound by UDF functon
+ if (resource.getType() == (ResourceType.UDF)) {
+ List udfFuncs = udfFunctionMapper.listUdfByResourceId(new int[]{resourceId});
+ if (CollectionUtils.isNotEmpty(udfFuncs)) {
+ logger.error("can't be deleted,because it is bound by UDF functions:{}",udfFuncs.toString());
+ putMsg(result,Status.UDF_RESOURCE_IS_BOUND,udfFuncs.get(0).getFuncName());
+ return result;
+ }
+ }
- Tenant tenant = tenantMapper.queryById(loginUser.getTenantId());
- if (tenant == null){
- putMsg(result, Status.TENANT_NOT_EXIST);
+ String tenantCode = getTenantCode(resource.getUserId(),result);
+ if (StringUtils.isEmpty(tenantCode)){
+ return result;
+ }
+
+ // get all resource id of process definitions those is released
+ Map> resourceProcessMap = getResourceProcessMap();
+ Set resourceIdSet = resourceProcessMap.keySet();
+ // get all children of the resource
+ List allChildren = listAllChildren(resource);
+
+ if (resourceIdSet.contains(resource.getPid())) {
+ logger.error("can't be deleted,because it is used of process definition");
+ putMsg(result, Status.RESOURCE_IS_USED);
+ return result;
+ }
+ resourceIdSet.retainAll(allChildren);
+ if (CollectionUtils.isNotEmpty(resourceIdSet)) {
+ logger.error("can't be deleted,because it is used of process definition");
+ for (Integer resId : resourceIdSet) {
+ logger.error("resource id:{} is used of process definition {}",resId,resourceProcessMap.get(resId));
+ }
+ putMsg(result, Status.RESOURCE_IS_USED);
return result;
}
- String hdfsFilename = "";
- // delete hdfs file by type
- String tenantCode = tenant.getTenantCode();
- hdfsFilename = getHdfsFileName(resource, tenantCode, hdfsFilename);
+ // get hdfs file by type
+ String hdfsFilename = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getFullName());
//delete data in database
- resourcesMapper.deleteById(resourceId);
+ resourcesMapper.deleteIds(allChildren.toArray(new Integer[allChildren.size()]));
resourceUserMapper.deleteResourceUser(0, resourceId);
+
//delete file on hdfs
- HadoopUtils.getInstance().delete(hdfsFilename, false);
+ HadoopUtils.getInstance().delete(hdfsFilename, true);
putMsg(result, Status.SUCCESS);
return result;
@@ -444,15 +645,15 @@ public class ResourcesService extends BaseService {
/**
* verify resource by name and type
* @param loginUser login user
- * @param name resource alias
- * @param type resource type
+ * @param fullName resource full name
+ * @param type resource type
* @return true if the resource name not exists, otherwise return false
*/
- public Result verifyResourceName(String name, ResourceType type,User loginUser) {
+ public Result verifyResourceName(String fullName, ResourceType type,User loginUser) {
Result result = new Result();
putMsg(result, Status.SUCCESS);
- if (checkResourceExists(name, 0, type.ordinal())) {
- logger.error("resource type:{} name:{} has exist, can't create again.", type, name);
+ if (checkResourceExists(fullName, 0, type.ordinal())) {
+ logger.error("resource type:{} name:{} has exist, can't create again.", type, fullName);
putMsg(result, Status.RESOURCE_EXIST);
} else {
// query tenant
@@ -461,9 +662,9 @@ public class ResourcesService extends BaseService {
String tenantCode = tenant.getTenantCode();
try {
- String hdfsFilename = getHdfsFileName(type,tenantCode,name);
+ String hdfsFilename = HadoopUtils.getHdfsFileName(type,tenantCode,fullName);
if(HadoopUtils.getInstance().exists(hdfsFilename)){
- logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, name,hdfsFilename);
+ logger.error("resource type:{} name:{} has exist in hdfs {}, can't create again.", type, fullName,hdfsFilename);
putMsg(result, Status.RESOURCE_FILE_EXIST,hdfsFilename);
}
@@ -480,6 +681,48 @@ public class ResourcesService extends BaseService {
return result;
}
+ /**
+ * verify resource by full name or pid and type
+ * @param fullName resource full name
+ * @param id resource id
+ * @param type resource type
+ * @return true if the resource full name or pid not exists, otherwise return false
+ */
+ public Result queryResource(String fullName,Integer id,ResourceType type) {
+ Result result = new Result();
+ if (StringUtils.isBlank(fullName) && id == null) {
+ logger.error("You must input one of fullName and pid");
+ putMsg(result, Status.REQUEST_PARAMS_NOT_VALID_ERROR);
+ return result;
+ }
+ if (StringUtils.isNotBlank(fullName)) {
+ List resourceList = resourcesMapper.queryResource(fullName,type.ordinal());
+ if (CollectionUtils.isEmpty(resourceList)) {
+ logger.error("resource file not exist, resource full name {} ", fullName);
+ putMsg(result, Status.RESOURCE_NOT_EXIST);
+ return result;
+ }
+ putMsg(result, Status.SUCCESS);
+ result.setData(resourceList.get(0));
+ } else {
+ Resource resource = resourcesMapper.selectById(id);
+ if (resource == null) {
+ logger.error("resource file not exist, resource id {}", id);
+ putMsg(result, Status.RESOURCE_NOT_EXIST);
+ return result;
+ }
+ Resource parentResource = resourcesMapper.selectById(resource.getPid());
+ if (parentResource == null) {
+ logger.error("parent resource file not exist, resource id {}", id);
+ putMsg(result, Status.RESOURCE_NOT_EXIST);
+ return result;
+ }
+ putMsg(result, Status.SUCCESS);
+ result.setData(parentResource);
+ }
+ return result;
+ }
+
/**
* view resource file online
*
@@ -501,7 +744,7 @@ public class ResourcesService extends BaseService {
// get resource by id
Resource resource = resourcesMapper.selectById(resourceId);
if (resource == null) {
- logger.error("resouce file not exist, resource id {}", resourceId);
+ logger.error("resource file not exist, resource id {}", resourceId);
putMsg(result, Status.RESOURCE_NOT_EXIST);
return result;
}
@@ -511,7 +754,7 @@ public class ResourcesService extends BaseService {
if (StringUtils.isNotEmpty(resourceViewSuffixs)) {
List strList = Arrays.asList(resourceViewSuffixs.split(","));
if (!strList.contains(nameSuffix)) {
- logger.error("resouce suffix {} not support view, resource id {}", nameSuffix, resourceId);
+ logger.error("resource suffix {} not support view, resource id {}", nameSuffix, resourceId);
putMsg(result, Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW);
return result;
}
@@ -523,7 +766,7 @@ public class ResourcesService extends BaseService {
}
// hdfs path
- String hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
+ String hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resource.getFullName());
logger.info("resource hdfs path is {} ", hdfsFileName);
try {
if(HadoopUtils.getInstance().exists(hdfsFileName)){
@@ -559,7 +802,7 @@ public class ResourcesService extends BaseService {
* @return create result code
*/
@Transactional(rollbackFor = Exception.class)
- public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content) {
+ public Result onlineCreateResource(User loginUser, ResourceType type, String fileName, String fileSuffix, String desc, String content,int pid,String currentDirectory) {
Result result = new Result();
// if resource upload startup
if (!PropertyUtils.getResUploadStartupState()){
@@ -581,15 +824,16 @@ public class ResourcesService extends BaseService {
}
String name = fileName.trim() + "." + nameSuffix;
+ String fullName = currentDirectory.equals("/") ? String.format("%s%s",currentDirectory,name):String.format("%s/%s",currentDirectory,name);
- result = verifyResourceName(name,type,loginUser);
+ result = verifyResourceName(fullName,type,loginUser);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
return result;
}
// save data
Date now = new Date();
- Resource resource = new Resource(name,name,desc,loginUser.getId(),type,content.getBytes().length,now,now);
+ Resource resource = new Resource(pid,name,fullName,false,desc,name,loginUser.getId(),type,content.getBytes().length,now,now);
resourcesMapper.insert(resource);
@@ -605,7 +849,7 @@ public class ResourcesService extends BaseService {
String tenantCode = tenantMapper.queryById(loginUser.getTenantId()).getTenantCode();
- result = uploadContentToHdfs(name, tenantCode, content);
+ result = uploadContentToHdfs(fullName, tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
@@ -657,7 +901,7 @@ public class ResourcesService extends BaseService {
resourcesMapper.updateById(resource);
- result = uploadContentToHdfs(resource.getAlias(), tenantCode, content);
+ result = uploadContentToHdfs(resource.getFullName(), tenantCode, content);
if (!result.getCode().equals(Status.SUCCESS.getCode())) {
throw new RuntimeException(result.getMsg());
}
@@ -665,10 +909,10 @@ public class ResourcesService extends BaseService {
}
/**
- * @param resourceName
- * @param tenantCode
- * @param content
- * @return
+ * @param resourceName resource name
+ * @param tenantCode tenant code
+ * @param content content
+ * @return result
*/
private Result uploadContentToHdfs(String resourceName, String tenantCode, String content) {
Result result = new Result();
@@ -684,8 +928,8 @@ public class ResourcesService extends BaseService {
return result;
}
- // get file hdfs path
- hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resourceName);
+ // get resource file hdfs path
+ hdfsFileName = HadoopUtils.getHdfsResourceFileName(tenantCode, resourceName);
String resourcePath = HadoopUtils.getHdfsResDir(tenantCode);
logger.info("resource hdfs path is {} ", hdfsFileName);
@@ -729,11 +973,14 @@ public class ResourcesService extends BaseService {
logger.error("download file not exist, resource id {}", resourceId);
return null;
}
+ if (resource.isDirectory()) {
+ logger.error("resource id {} is directory,can't download it", resourceId);
+ throw new RuntimeException("cant't download directory");
+ }
User user = userMapper.queryDetailsById(resource.getUserId());
String tenantCode = tenantMapper.queryById(user.getTenantId()).getTenantCode();
- String hdfsFileName = "";
- hdfsFileName = getHdfsFileName(resource, tenantCode, hdfsFileName);
+ String hdfsFileName = HadoopUtils.getHdfsFileName(resource.getType(), tenantCode, resource.getAlias());
String localFileName = FileUtils.getDownloadFilename(resource.getAlias());
logger.info("resource hdfs path is {} ", hdfsFileName);
@@ -743,6 +990,33 @@ public class ResourcesService extends BaseService {
}
+ /**
+ * list all file
+ *
+ * @param loginUser login user
+ * @param userId user id
+ * @return unauthorized result code
+ */
+ public Map authorizeResourceTree(User loginUser, Integer userId) {
+
+ Map result = new HashMap<>();
+ if (checkAdmin(loginUser, result)) {
+ return result;
+ }
+ List resourceList = resourcesMapper.queryResourceExceptUserId(userId);
+ List list ;
+ if (CollectionUtils.isNotEmpty(resourceList)) {
+ Visitor visitor = new ResourceTreeVisitor(resourceList);
+ list = visitor.visit().getChildren();
+ }else {
+ list = new ArrayList<>(0);
+ }
+
+ result.put(Constants.DATA_LIST, list);
+ putMsg(result,Status.SUCCESS);
+ return result;
+ }
+
/**
* unauthorized file
*
@@ -757,8 +1031,8 @@ public class ResourcesService extends BaseService {
return result;
}
List resourceList = resourcesMapper.queryResourceExceptUserId(userId);
- List list ;
- if (CollectionUtils.isNotEmpty(resourceList)) {
+ List list ;
+ if (resourceList != null && resourceList.size() > 0) {
Set resourceSet = new HashSet<>(resourceList);
List authedResourceList = resourcesMapper.queryAuthorizedResourceList(userId);
@@ -767,15 +1041,12 @@ public class ResourcesService extends BaseService {
}else {
list = new ArrayList<>(0);
}
-
- result.put(Constants.DATA_LIST, list);
+ Visitor visitor = new ResourceTreeVisitor(list);
+ result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
-
-
-
/**
* unauthorized udf function
*
@@ -841,46 +1112,15 @@ public class ResourcesService extends BaseService {
return result;
}
List authedResources = resourcesMapper.queryAuthorizedResourceList(userId);
-
- result.put(Constants.DATA_LIST, authedResources);
+ Visitor visitor = new ResourceTreeVisitor(authedResources);
+ logger.info(JSON.toJSONString(visitor.visit(), SerializerFeature.SortField));
+ String jsonTreeStr = JSON.toJSONString(visitor.visit().getChildren(), SerializerFeature.SortField);
+ logger.info(jsonTreeStr);
+ result.put(Constants.DATA_LIST, visitor.visit().getChildren());
putMsg(result,Status.SUCCESS);
return result;
}
- /**
- * get hdfs file name
- *
- * @param resource resource
- * @param tenantCode tenant code
- * @param hdfsFileName hdfs file name
- * @return hdfs file name
- */
- private String getHdfsFileName(Resource resource, String tenantCode, String hdfsFileName) {
- if (resource.getType().equals(ResourceType.FILE)) {
- hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, resource.getAlias());
- } else if (resource.getType().equals(ResourceType.UDF)) {
- hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, resource.getAlias());
- }
- return hdfsFileName;
- }
-
- /**
- * get hdfs file name
- *
- * @param resourceType resource type
- * @param tenantCode tenant code
- * @param hdfsFileName hdfs file name
- * @return hdfs file name
- */
- private String getHdfsFileName(ResourceType resourceType, String tenantCode, String hdfsFileName) {
- if (resourceType.equals(ResourceType.FILE)) {
- hdfsFileName = HadoopUtils.getHdfsFilename(tenantCode, hdfsFileName);
- } else if (resourceType.equals(ResourceType.UDF)) {
- hdfsFileName = HadoopUtils.getHdfsUdfFilename(tenantCode, hdfsFileName);
- }
- return hdfsFileName;
- }
-
/**
* get authorized resource list
*
@@ -920,4 +1160,69 @@ public class ResourcesService extends BaseService {
return tenant.getTenantCode();
}
+ /**
+ * list all children id
+ * @param resource resource
+ * @return all children id
+ */
+ List listAllChildren(Resource resource){
+ List childList = new ArrayList<>();
+ if (resource.getId() != -1) {
+ childList.add(resource.getId());
+ }
+
+ if(resource.isDirectory()){
+ listAllChildren(resource.getId(),childList);
+ }
+ return childList;
+ }
+
+ /**
+ * list all children id
+ * @param resourceId resource id
+ * @param childList child list
+ */
+ void listAllChildren(int resourceId,List childList){
+
+ List children = resourcesMapper.listChildren(resourceId);
+ for(int chlidId:children){
+ childList.add(chlidId);
+ listAllChildren(chlidId,childList);
+ }
+ }
+
+ /**
+ * get resource process map key is resource id,value is the set of process definition
+ * @return resource process definition map
+ */
+ private Map> getResourceProcessMap(){
+ Map map = new HashMap<>();
+ Map> result = new HashMap<>();
+ List> list = processDefinitionMapper.listResources();
+ if (CollectionUtils.isNotEmpty(list)) {
+ for (Map tempMap : list) {
+
+ map.put((Integer) tempMap.get("id"), (String)tempMap.get("resource_ids"));
+ }
+ }
+
+ for (Map.Entry entry : map.entrySet()) {
+ Integer mapKey = entry.getKey();
+ String[] arr = entry.getValue().split(",");
+ Set mapValues = Arrays.stream(arr).map(Integer::parseInt).collect(Collectors.toSet());
+ for (Integer value : mapValues) {
+ if (result.containsKey(value)) {
+ Set set = result.get(value);
+ set.add(mapKey);
+ result.put(value, set);
+ } else {
+ Set set = new HashSet<>();
+ set.add(mapKey);
+ result.put(value, set);
+ }
+ }
+ }
+ return result;
+ }
+
}
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java
index 249c7ec8df..8a0bf748bb 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/UdfFuncService.java
@@ -118,7 +118,7 @@ public class UdfFuncService extends BaseService{
}
udf.setDescription(desc);
udf.setResourceId(resourceId);
- udf.setResourceName(resource.getAlias());
+ udf.setResourceName(resource.getFullName());
udf.setType(type);
udf.setCreateTime(now);
@@ -226,7 +226,7 @@ public class UdfFuncService extends BaseService{
}
udf.setDescription(desc);
udf.setResourceId(resourceId);
- udf.setResourceName(resource.getAlias());
+ udf.setResourceName(resource.getFullName());
udf.setType(type);
udf.setUpdateTime(now);
diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
new file mode 100644
index 0000000000..8a4a16c4f0
--- /dev/null
+++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/filter/ResourceFilterTest.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.dolphinscheduler.api.dto.resources.filter;
+
+import org.apache.dolphinscheduler.dao.entity.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * resource filter test
+ */
+public class ResourceFilterTest {
+ private static Logger logger = LoggerFactory.getLogger(ResourceFilterTest.class);
+ @Test
+ public void filterTest(){
+ List allList = new ArrayList<>();
+
+ Resource resource1 = new Resource(3,-1,"b","/b",true);
+ Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
+ Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
+ Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
+ Resource resource5 = new Resource(7,-1,"b2","/b2",true);
+ Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
+ Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
+ allList.add(resource1);
+ allList.add(resource2);
+ allList.add(resource3);
+ allList.add(resource4);
+ allList.add(resource5);
+ allList.add(resource6);
+ allList.add(resource7);
+
+
+ ResourceFilter resourceFilter = new ResourceFilter(".jar",allList);
+ List resourceList = resourceFilter.filter();
+ Assert.assertNotNull(resourceList);
+ resourceList.stream().forEach(t-> logger.info(t.toString()));
+ }
+}
\ No newline at end of file
diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java
new file mode 100644
index 0000000000..d1f8a12012
--- /dev/null
+++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/dto/resources/visitor/ResourceTreeVisitorTest.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.dolphinscheduler.api.dto.resources.visitor;
+
+import org.apache.dolphinscheduler.api.dto.resources.ResourceComponent;
+import org.apache.dolphinscheduler.dao.entity.Resource;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * resource tree visitor test
+ */
+public class ResourceTreeVisitorTest {
+
+ @Test
+ public void visit() throws Exception {
+ List resourceList = new ArrayList<>();
+
+ Resource resource1 = new Resource(3,-1,"b","/b",true);
+ Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
+ Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
+ Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
+ Resource resource5 = new Resource(7,-1,"b2","/b2",true);
+ Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
+ Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
+ resourceList.add(resource1);
+ resourceList.add(resource2);
+ resourceList.add(resource3);
+ resourceList.add(resource4);
+ resourceList.add(resource5);
+ resourceList.add(resource6);
+ resourceList.add(resource7);
+
+ ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
+ ResourceComponent resourceComponent = resourceTreeVisitor.visit();
+ Assert.assertNotNull(resourceComponent.getChildren());
+ }
+
+ @Test
+ public void rootNode() throws Exception {
+ List resourceList = new ArrayList<>();
+
+ Resource resource1 = new Resource(3,-1,"b","/b",true);
+ Resource resource2 = new Resource(4,2,"a1.txt","/a/a1.txt",false);
+ Resource resource3 = new Resource(5,3,"b1.txt","/b/b1.txt",false);
+ Resource resource4 = new Resource(6,3,"b2.jar","/b/b2.jar",false);
+ Resource resource5 = new Resource(7,-1,"b2","/b2",true);
+ Resource resource6 = new Resource(8,-1,"b2","/b/b2",true);
+ Resource resource7 = new Resource(9,8,"c2.jar","/b/b2/c2.jar",false);
+ resourceList.add(resource1);
+ resourceList.add(resource2);
+ resourceList.add(resource3);
+ resourceList.add(resource4);
+ resourceList.add(resource5);
+ resourceList.add(resource6);
+ resourceList.add(resource7);
+
+ ResourceTreeVisitor resourceTreeVisitor = new ResourceTreeVisitor(resourceList);
+ Assert.assertTrue(resourceTreeVisitor.rootNode(resource1));
+ Assert.assertTrue(resourceTreeVisitor.rootNode(resource2));
+ Assert.assertFalse(resourceTreeVisitor.rootNode(resource3));
+
+ }
+
+}
\ No newline at end of file
diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
index 6d07ebd99c..d73eba8bdc 100644
--- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
+++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/service/ResourcesServiceTest.java
@@ -24,10 +24,7 @@ import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.UserType;
-import org.apache.dolphinscheduler.common.utils.CollectionUtils;
-import org.apache.dolphinscheduler.common.utils.FileUtils;
-import org.apache.dolphinscheduler.common.utils.HadoopUtils;
-import org.apache.dolphinscheduler.common.utils.PropertyUtils;
+import org.apache.dolphinscheduler.common.utils.*;
import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.Tenant;
import org.apache.dolphinscheduler.dao.entity.UdfFunc;
@@ -40,6 +37,7 @@ import org.junit.runner.RunWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.Mockito;
+import org.omg.CORBA.Any;
import org.powermock.api.mockito.PowerMockito;
import org.powermock.core.classloader.annotations.PowerMockIgnore;
import org.powermock.core.classloader.annotations.PrepareForTest;
@@ -73,6 +71,8 @@ public class ResourcesServiceTest {
private UserMapper userMapper;
@Mock
private UdfFuncMapper udfFunctionMapper;
+ @Mock
+ private ProcessDefinitionMapper processDefinitionMapper;
@Before
public void setUp() {
@@ -96,14 +96,14 @@ public class ResourcesServiceTest {
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
User user = new User();
//HDFS_NOT_STARTUP
- Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null);
+ Result result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,null,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_FILE_IS_EMPTY
MockMultipartFile mockMultipartFile = new MockMultipartFile("test.pdf",new String().getBytes());
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
- result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile);
+ result = resourcesService.createResource(user,"ResourcesServiceTest","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_FILE_IS_EMPTY.getMsg(),result.getMsg());
@@ -111,31 +111,42 @@ public class ResourcesServiceTest {
mockMultipartFile = new MockMultipartFile("test.pdf","test.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("test.pdf")).thenReturn("pdf");
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.jar")).thenReturn("jar");
- result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile);
+ result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_FORBID_CHANGE.getMsg(),result.getMsg());
//UDF_RESOURCE_SUFFIX_NOT_JAR
mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.pdf","ResourcesServiceTest.pdf","pdf",new String("test").getBytes());
PowerMockito.when(FileUtils.suffix("ResourcesServiceTest.pdf")).thenReturn("pdf");
- result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
+ result = resourcesService.createResource(user,"ResourcesServiceTest.pdf","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile,-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.UDF_RESOURCE_SUFFIX_NOT_JAR.getMsg(),result.getMsg());
- //UDF_RESOURCE_SUFFIX_NOT_JAR
- Mockito.when(tenantMapper.queryById(0)).thenReturn(getTenant());
- Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(getResourceList());
- mockMultipartFile = new MockMultipartFile("ResourcesServiceTest.jar","ResourcesServiceTest.jar","pdf",new String("test").getBytes());
- result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
+ }
+
+ @Test
+ public void testCreateDirecotry(){
+
+ PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(false);
+ User user = new User();
+ //HDFS_NOT_STARTUP
+ Result result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
logger.info(result.toString());
- Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
+ Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
- //SUCCESS
- Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest.jar", 0, 1)).thenReturn(new ArrayList<>());
- result = resourcesService.createResource(user,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.UDF,mockMultipartFile);
+ //PARENT_RESOURCE_NOT_EXIST
+ PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
+ Mockito.when(resourcesMapper.selectById(Mockito.anyInt())).thenReturn(null);
+ result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,1,"/");
logger.info(result.toString());
- Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
+ Assert.assertEquals(Status.PARENT_RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
+ //RESOURCE_EXIST
+ PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
+ Mockito.when(resourcesMapper.queryResourceList("/directoryTest", 0, 0)).thenReturn(getResourceList());
+ result = resourcesService.createDirectory(user,"directoryTest","directory test",ResourceType.FILE,-1,"/");
+ logger.info(result.toString());
+ Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
}
@@ -163,41 +174,46 @@ public class ResourcesServiceTest {
//SUCCESS
user.setId(1);
- result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest.jar",ResourceType.FILE);
+ Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
+ Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
+
+ result = resourcesService.updateResource(user,1,"ResourcesServiceTest.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
//RESOURCE_EXIST
- Mockito.when(resourcesMapper.queryResourceList("ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
- result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.FILE);
+ Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest1.jar", 0, 0)).thenReturn(getResourceList());
+ result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.FILE);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(),result.getMsg());
//USER_NOT_EXIST
- result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
+ Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(null);
+ result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertTrue(Status.USER_NOT_EXIST.getCode() == result.getCode());
//TENANT_NOT_EXIST
Mockito.when(userMapper.queryDetailsById(1)).thenReturn(getUser());
- result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
+ Mockito.when(tenantMapper.queryById(Mockito.anyInt())).thenReturn(null);
+ result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(),result.getMsg());
//RESOURCE_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
- PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test1");
+ PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test1");
try {
Mockito.when(hadoopUtils.exists("test")).thenReturn(true);
} catch (IOException e) {
e.printStackTrace();
}
- result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
+ result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(),result.getMsg());
//SUCCESS
- PowerMockito.when(HadoopUtils.getHdfsFilename(Mockito.any(), Mockito.any())).thenReturn("test");
+ PowerMockito.when(HadoopUtils.getHdfsResourceFileName(Mockito.any(), Mockito.any())).thenReturn("test");
result = resourcesService.updateResource(user,1,"ResourcesServiceTest1.jar","ResourcesServiceTest1.jar",ResourceType.UDF);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@@ -212,8 +228,8 @@ public class ResourcesServiceTest {
resourcePage.setTotal(1);
resourcePage.setRecords(getResourceList());
Mockito.when(resourcesMapper.queryResourcePaging(Mockito.any(Page.class),
- Mockito.eq(0), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
- Map result = resourcesService.queryResourceListPaging(loginUser,ResourceType.FILE,"test",1,10);
+ Mockito.eq(0),Mockito.eq(-1), Mockito.eq(0), Mockito.eq("test"))).thenReturn(resourcePage);
+ Map result = resourcesService.queryResourceListPaging(loginUser,-1,ResourceType.FILE,"test",1,10);
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS, result.get(Constants.STATUS));
PageInfo pageInfo = (PageInfo) result.get(Constants.DATA_LIST);
@@ -263,6 +279,7 @@ public class ResourcesServiceTest {
//TENANT_NOT_EXIST
loginUser.setUserType(UserType.ADMIN_USER);
loginUser.setTenantId(2);
+ Mockito.when(userMapper.queryDetailsById(Mockito.anyInt())).thenReturn(loginUser);
result = resourcesService.delete(loginUser,1);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@@ -285,14 +302,20 @@ public class ResourcesServiceTest {
User user = new User();
user.setId(1);
- Mockito.when(resourcesMapper.queryResourceList("test", 0, 0)).thenReturn(getResourceList());
- Result result = resourcesService.verifyResourceName("test",ResourceType.FILE,user);
+ Mockito.when(resourcesMapper.queryResourceList("/ResourcesServiceTest.jar", 0, 0)).thenReturn(getResourceList());
+ Result result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_EXIST.getMsg(), result.getMsg());
//TENANT_NOT_EXIST
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
- result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user);
+ String unExistFullName = "/test.jar";
+ try {
+ Mockito.when(hadoopUtils.exists(unExistFullName)).thenReturn(false);
+ } catch (IOException e) {
+ logger.error("hadoop error",e);
+ }
+ result = resourcesService.verifyResourceName("/test.jar",ResourceType.FILE,user);
logger.info(result.toString());
Assert.assertEquals(Status.TENANT_NOT_EXIST.getMsg(), result.getMsg());
@@ -304,10 +327,10 @@ public class ResourcesServiceTest {
} catch (IOException e) {
logger.error("hadoop error",e);
}
- PowerMockito.when(HadoopUtils.getHdfsFilename("123", "test1")).thenReturn("test");
- result = resourcesService.verifyResourceName("test1",ResourceType.FILE,user);
+ PowerMockito.when(HadoopUtils.getHdfsResourceFileName("123", "test1")).thenReturn("test");
+ result = resourcesService.verifyResourceName("/ResourcesServiceTest.jar",ResourceType.FILE,user);
logger.info(result.toString());
- Assert.assertTrue(Status.RESOURCE_FILE_EXIST.getCode()==result.getCode());
+ Assert.assertTrue(Status.RESOURCE_EXIST.getCode()==result.getCode());
//SUCCESS
result = resourcesService.verifyResourceName("test2",ResourceType.FILE,user);
@@ -389,14 +412,14 @@ public class ResourcesServiceTest {
PowerMockito.when(HadoopUtils.getHdfsUdfDir("udfDir")).thenReturn("udfDir");
User user = getUser();
//HDFS_NOT_STARTUP
- Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
+ Result result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.HDFS_NOT_STARTUP.getMsg(),result.getMsg());
//RESOURCE_SUFFIX_NOT_SUPPORT_VIEW
PowerMockito.when(PropertyUtils.getResUploadStartupState()).thenReturn(true);
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("class");
- result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
+ result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_SUFFIX_NOT_SUPPORT_VIEW.getMsg(),result.getMsg());
@@ -404,7 +427,7 @@ public class ResourcesServiceTest {
try {
PowerMockito.when(FileUtils.getResourceViewSuffixs()).thenReturn("jar");
Mockito.when(tenantMapper.queryById(1)).thenReturn(getTenant());
- result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content");
+ result = resourcesService.onlineCreateResource(user, ResourceType.FILE, "test", "jar", "desc", "content",-1,"/");
}catch (RuntimeException ex){
logger.info(result.toString());
Assert.assertEquals(Status.RESOURCE_NOT_EXIST.getMsg(), ex.getMessage());
@@ -413,7 +436,7 @@ public class ResourcesServiceTest {
//SUCCESS
Mockito.when(FileUtils.getUploadFilename(Mockito.anyString(), Mockito.anyString())).thenReturn("test");
PowerMockito.when(FileUtils.writeContent2File(Mockito.anyString(), Mockito.anyString())).thenReturn(true);
- result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content");
+ result = resourcesService.onlineCreateResource(user,ResourceType.FILE,"test","jar","desc","content",-1,"/");
logger.info(result.toString());
Assert.assertEquals(Status.SUCCESS.getMsg(),result.getMsg());
@@ -584,13 +607,26 @@ public class ResourcesServiceTest {
private Resource getResource(){
Resource resource = new Resource();
+ resource.setPid(-1);
resource.setUserId(1);
resource.setDescription("ResourcesServiceTest.jar");
resource.setAlias("ResourcesServiceTest.jar");
+ resource.setFullName("/ResourcesServiceTest.jar");
resource.setType(ResourceType.FILE);
return resource;
}
+ private Resource getUdfResource(){
+
+ Resource resource = new Resource();
+ resource.setUserId(1);
+ resource.setDescription("udfTest");
+ resource.setAlias("udfTest.jar");
+ resource.setFullName("/udfTest.jar");
+ resource.setType(ResourceType.UDF);
+ return resource;
+ }
+
private UdfFunc getUdfFunc(){
UdfFunc udfFunc = new UdfFunc();
diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java
index 308ed8e9b6..ccc231fcf6 100644
--- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java
+++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/utils/CheckUtilsTest.java
@@ -43,6 +43,7 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.util.ArrayList;
import java.util.Map;
import static org.junit.Assert.*;
@@ -173,7 +174,11 @@ public class CheckUtilsTest {
// MapreduceParameters
MapreduceParameters mapreduceParameters = new MapreduceParameters();
assertFalse(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));
- mapreduceParameters.setMainJar(new ResourceInfo());
+
+ ResourceInfo resourceInfoMapreduce = new ResourceInfo();
+ resourceInfoMapreduce.setId(1);
+ resourceInfoMapreduce.setRes("");
+ mapreduceParameters.setMainJar(resourceInfoMapreduce);
mapreduceParameters.setProgramType(ProgramType.JAVA);
assertTrue(CheckUtils.checkTaskNodeParameters(JSONUtils.toJsonString(mapreduceParameters), TaskType.MR.toString()));
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java
index 1c371e799e..633f5f9623 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/AuthorizationType.java
@@ -23,13 +23,17 @@ import com.baomidou.mybatisplus.annotation.EnumValue;
*/
public enum AuthorizationType {
/**
- * 0 RESOURCE_FILE;
+ * 0 RESOURCE_FILE_ID;
+ * 0 RESOURCE_FILE_NAME;
+ * 1 UDF_FILE;
* 1 DATASOURCE;
* 2 UDF;
*/
- RESOURCE_FILE(0, "resource file"),
- DATASOURCE(1, "data source"),
- UDF(2, "udf function");
+ RESOURCE_FILE_ID(0, "resource file id"),
+ RESOURCE_FILE_NAME(1, "resource file name"),
+ UDF_FILE(2, "udf file"),
+ DATASOURCE(3, "data source"),
+ UDF(4, "udf function");
AuthorizationType(int code, String descp){
this.code = code;
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
index 3c95ac648b..a7fc0839eb 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/process/ResourceInfo.java
@@ -23,6 +23,16 @@ public class ResourceInfo {
/**
* res the name of the resource that was uploaded
*/
+ private int id;
+
+ public int getId() {
+ return id;
+ }
+
+ public void setId(int id) {
+ this.id = id;
+ }
+
private String res;
public String getRes() {
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
index 2d0322a6d7..ae78caf881 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/AbstractParameters.java
@@ -17,6 +17,7 @@
package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.Property;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import java.util.LinkedHashMap;
import java.util.List;
@@ -31,7 +32,7 @@ public abstract class AbstractParameters implements IParameters {
public abstract boolean checkParameters();
@Override
- public abstract List getResourceFilesList();
+ public abstract List getResourceFilesList();
/**
* local parameters
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
index 8fb49eb1fa..88a2b54761 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/IParameters.java
@@ -16,6 +16,8 @@
*/
package org.apache.dolphinscheduler.common.task;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
+
import java.util.List;
/**
@@ -34,5 +36,5 @@ public interface IParameters {
*
* @return resource files list
*/
- List getResourceFilesList();
+ List getResourceFilesList();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java
index 5714b5ef3e..7f0f2c8079 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/conditions/ConditionsParameters.java
@@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.conditions;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List;
@@ -41,7 +42,7 @@ public class ConditionsParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return null;
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
index f153360d63..872b3aa174 100755
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/datax/DataxParameters.java
@@ -20,6 +20,7 @@ import java.util.ArrayList;
import java.util.List;
import org.apache.commons.lang.StringUtils;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
/**
@@ -198,7 +199,7 @@ public class DataxParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java
index 9ff1405722..5f2e0e1853 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/dependent/DependentParameters.java
@@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task.dependent;
import org.apache.dolphinscheduler.common.enums.DependentRelation;
import org.apache.dolphinscheduler.common.model.DependentTaskModel;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList;
@@ -36,7 +37,7 @@ public class DependentParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
index 1fbd9ab354..05cbb1d794 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/flink/FlinkParameters.java
@@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.flink;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
+import org.apache.dolphinscheduler.common.utils.CollectionUtils;
-import java.util.Collections;
+import java.util.ArrayList;
import java.util.List;
-import java.util.stream.Collectors;
/**
* spark parameters
@@ -50,35 +50,35 @@ public class FlinkParameters extends AbstractParameters {
private String mainArgs;
/**
- * slot个数
+ * slot count
*/
private int slot;
/**
- *Yarn application的名字
+ *Yarn application name
*/
private String appName;
/**
- * taskManager 数量
+ * taskManager count
*/
private int taskManager;
/**
- * jobManagerMemory 内存大小
+ * job manager memory
*/
private String jobManagerMemory ;
/**
- * taskManagerMemory内存大小
+ * task manager memory
*/
private String taskManagerMemory;
/**
* resource list
*/
- private List resourceList;
+ private List resourceList = new ArrayList<>();
/**
* The YARN queue to submit to
@@ -207,16 +207,11 @@ public class FlinkParameters extends AbstractParameters {
@Override
- public List getResourceFilesList() {
- if(resourceList != null ) {
- List resourceFiles = resourceList.stream()
- .map(ResourceInfo::getRes).collect(Collectors.toList());
- if(mainJar != null) {
- resourceFiles.add(mainJar.getRes());
- }
- return resourceFiles;
+ public List getResourceFilesList() {
+ if (mainJar != null && !resourceList.contains(mainJar)) {
+ resourceList.add(mainJar);
}
- return Collections.emptyList();
+ return resourceList;
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java
index 00b01afce3..54284bd8b0 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/http/HttpParameters.java
@@ -19,6 +19,7 @@ package org.apache.dolphinscheduler.common.task.http;
import org.apache.dolphinscheduler.common.enums.HttpCheckCondition;
import org.apache.dolphinscheduler.common.enums.HttpMethod;
import org.apache.dolphinscheduler.common.process.HttpProperty;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@@ -62,7 +63,7 @@ public class HttpParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java
index 31c9c7292f..5126e82e85 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/mr/MapreduceParameters.java
@@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.mr;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
+import org.apache.dolphinscheduler.common.utils.CollectionUtils;
-import java.util.Collections;
+import java.util.ArrayList;
import java.util.List;
-import java.util.stream.Collectors;
public class MapreduceParameters extends AbstractParameters {
@@ -54,7 +54,7 @@ public class MapreduceParameters extends AbstractParameters {
/**
* resource list
*/
- private List resourceList;
+ private List resourceList = new ArrayList<>();
/**
* program type
@@ -125,16 +125,12 @@ public class MapreduceParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
- if(resourceList != null ) {
- List resourceFiles = resourceList.stream()
- .map(ResourceInfo::getRes).collect(Collectors.toList());
- if(mainJar != null) {
- resourceFiles.add(mainJar.getRes());
- }
- return resourceFiles;
+ public List getResourceFilesList() {
+ if (mainJar != null && !resourceList.contains(mainJar)) {
+ resourceList.add(mainJar);
}
- return Collections.emptyList();
+
+ return resourceList;
}
@Override
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java
index 56ae65547d..2811f10380 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/procedure/ProcedureParameters.java
@@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.procedure;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@@ -74,7 +75,7 @@ public class ProcedureParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java
index ae9cb4c7da..35dbd8ed86 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/python/PythonParameters.java
@@ -21,7 +21,6 @@ import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.List;
-import java.util.stream.Collectors;
public class PythonParameters extends AbstractParameters {
/**
@@ -56,12 +55,7 @@ public class PythonParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
- if (resourceList != null) {
- return resourceList.stream()
- .map(p -> p.getRes()).collect(Collectors.toList());
- }
-
- return null;
+ public List getResourceFilesList() {
+ return this.resourceList;
}
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java
index 85b8acb46a..e11e59600b 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/shell/ShellParameters.java
@@ -59,12 +59,7 @@ public class ShellParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
- if (resourceList != null) {
- return resourceList.stream()
- .map(p -> p.getRes()).collect(Collectors.toList());
- }
-
- return null;
+ public List getResourceFilesList() {
+ return resourceList;
}
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
index 74982d5af9..4e58201bf3 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/spark/SparkParameters.java
@@ -19,10 +19,10 @@ package org.apache.dolphinscheduler.common.task.spark;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
+import org.apache.dolphinscheduler.common.utils.CollectionUtils;
-import java.util.Collections;
+import java.util.ArrayList;
import java.util.List;
-import java.util.stream.Collectors;
/**
* spark parameters
@@ -78,7 +78,7 @@ public class SparkParameters extends AbstractParameters {
/**
* resource list
*/
- private List resourceList;
+ private List resourceList = new ArrayList<>();
/**
* The YARN queue to submit to
@@ -219,18 +219,12 @@ public class SparkParameters extends AbstractParameters {
return mainJar != null && programType != null && sparkVersion != null;
}
-
@Override
- public List getResourceFilesList() {
- if(resourceList !=null ) {
- List resourceFilesList = resourceList.stream()
- .map(ResourceInfo::getRes).collect(Collectors.toList());
- if(mainJar != null){
- resourceFilesList.add(mainJar.getRes());
- }
- return resourceFilesList;
+ public List getResourceFilesList() {
+ if (mainJar != null && !resourceList.contains(mainJar)) {
+ resourceList.add(mainJar);
}
- return Collections.emptyList();
+ return resourceList;
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
index d65204a386..4604234e8f 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sql/SqlParameters.java
@@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.sql;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.commons.lang.StringUtils;
@@ -189,7 +190,7 @@ public class SqlParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java
index fb65df6c1b..7f02f42387 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/sqoop/SqoopParameters.java
@@ -16,6 +16,7 @@
*/
package org.apache.dolphinscheduler.common.task.sqoop;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.utils.StringUtils;
@@ -111,7 +112,7 @@ public class SqoopParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
}
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java
index c7784de8dd..46f0e8510c 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/task/subprocess/SubProcessParameters.java
@@ -15,6 +15,7 @@
* limitations under the License.
*/
package org.apache.dolphinscheduler.common.task.subprocess;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import java.util.ArrayList;
@@ -42,7 +43,7 @@ public class SubProcessParameters extends AbstractParameters {
}
@Override
- public List getResourceFilesList() {
+ public List getResourceFilesList() {
return new ArrayList<>();
}
}
\ No newline at end of file
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
index 6c42704b47..c89f3c835c 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/HadoopUtils.java
@@ -26,6 +26,7 @@ import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.IOUtils;
+import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.FileSystem;
@@ -415,6 +416,22 @@ public class HadoopUtils implements Closeable {
}
}
+ /**
+ * hdfs resource dir
+ *
+ * @param tenantCode tenant code
+ * @return hdfs resource dir
+ */
+ public static String getHdfsDir(ResourceType resourceType,String tenantCode) {
+ String hdfsDir = "";
+ if (resourceType.equals(ResourceType.FILE)) {
+ hdfsDir = getHdfsResDir(tenantCode);
+ } else if (resourceType.equals(ResourceType.UDF)) {
+ hdfsDir = getHdfsUdfDir(tenantCode);
+ }
+ return hdfsDir;
+ }
+
/**
* hdfs resource dir
*
@@ -450,22 +467,42 @@ public class HadoopUtils implements Closeable {
* get absolute path and name for file on hdfs
*
* @param tenantCode tenant code
- * @param filename file name
+ * @param fileName file name
+ * @return get absolute path and name for file on hdfs
+ */
+
+ /**
+ * get hdfs file name
+ *
+ * @param resourceType resource type
+ * @param tenantCode tenant code
+ * @param fileName file name
+ * @return hdfs file name
+ */
+ public static String getHdfsFileName(ResourceType resourceType, String tenantCode, String fileName) {
+ return String.format("%s/%s", getHdfsDir(resourceType,tenantCode), fileName);
+ }
+
+ /**
+ * get absolute path and name for resource file on hdfs
+ *
+ * @param tenantCode tenant code
+ * @param fileName file name
* @return get absolute path and name for file on hdfs
*/
- public static String getHdfsFilename(String tenantCode, String filename) {
- return String.format("%s/%s", getHdfsResDir(tenantCode), filename);
+ public static String getHdfsResourceFileName(String tenantCode, String fileName) {
+ return String.format("%s/%s", getHdfsResDir(tenantCode), fileName);
}
/**
* get absolute path and name for udf file on hdfs
*
* @param tenantCode tenant code
- * @param filename file name
+ * @param fileName file name
* @return get absolute path and name for udf file on hdfs
*/
- public static String getHdfsUdfFilename(String tenantCode, String filename) {
- return String.format("%s/%s", getHdfsUdfDir(tenantCode), filename);
+ public static String getHdfsUdfFileName(String tenantCode, String fileName) {
+ return String.format("%s/%s", getHdfsUdfDir(tenantCode), fileName);
}
/**
diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java
index 7ce00e875a..cd7b4f2200 100644
--- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java
+++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/task/FlinkParametersTest.java
@@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.common.task;
import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
+import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.junit.Assert;
import org.junit.Test;
@@ -28,8 +29,7 @@ public class FlinkParametersTest {
@Test
public void getResourceFilesList() {
FlinkParameters flinkParameters = new FlinkParameters();
- Assert.assertNotNull(flinkParameters.getResourceFilesList());
- Assert.assertTrue(flinkParameters.getResourceFilesList().isEmpty());
+ Assert.assertTrue(CollectionUtils.isEmpty(flinkParameters.getResourceFilesList()));
ResourceInfo mainResource = new ResourceInfo();
mainResource.setRes("testFlinkMain-1.0.0-SNAPSHOT.jar");
@@ -41,15 +41,17 @@ public class FlinkParametersTest {
resourceInfos.add(resourceInfo1);
flinkParameters.setResourceList(resourceInfos);
- Assert.assertNotNull(flinkParameters.getResourceFilesList());
- Assert.assertEquals(2, flinkParameters.getResourceFilesList().size());
+ List resourceFilesList = flinkParameters.getResourceFilesList();
+ Assert.assertNotNull(resourceFilesList);
+ Assert.assertEquals(2, resourceFilesList.size());
ResourceInfo resourceInfo2 = new ResourceInfo();
resourceInfo2.setRes("testFlinkParameters2.jar");
resourceInfos.add(resourceInfo2);
flinkParameters.setResourceList(resourceInfos);
- Assert.assertNotNull(flinkParameters.getResourceFilesList());
- Assert.assertEquals(3, flinkParameters.getResourceFilesList().size());
+ resourceFilesList = flinkParameters.getResourceFilesList();
+ Assert.assertNotNull(resourceFilesList);
+ Assert.assertEquals(3, resourceFilesList.size());
}
}
diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java
index f59d11f3fe..e29de897ef 100644
--- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java
+++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/ProcessDefinition.java
@@ -163,6 +163,11 @@ public class ProcessDefinition {
*/
private String modifyBy;
+ /**
+ * resource ids
+ */
+ private String resourceIds;
+
public String getName() {
return name;
@@ -334,6 +339,14 @@ public class ProcessDefinition {
this.scheduleReleaseState = scheduleReleaseState;
}
+ public String getResourceIds() {
+ return resourceIds;
+ }
+
+ public void setResourceIds(String resourceIds) {
+ this.resourceIds = resourceIds;
+ }
+
public int getTimeout() {
return timeout;
}
@@ -393,6 +406,8 @@ public class ProcessDefinition {
", timeout=" + timeout +
", tenantId=" + tenantId +
", modifyBy='" + modifyBy + '\'' +
+ ", resourceIds='" + resourceIds + '\'' +
'}';
}
+
}
diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java
index 934be4ba3d..16d94914fd 100644
--- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java
+++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/entity/Resource.java
@@ -32,11 +32,26 @@ public class Resource {
@TableId(value="id", type=IdType.AUTO)
private int id;
+ /**
+ * parent id
+ */
+ private int pid;
+
/**
* resource alias
*/
private String alias;
+ /**
+ * full name
+ */
+ private String fullName;
+
+ /**
+ * is directory
+ */
+ private boolean isDirectory=false;
+
/**
* description
*/
@@ -89,7 +104,15 @@ public class Resource {
this.updateTime = updateTime;
}
- public Resource(String alias, String fileName, String description, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
+ public Resource(int id, int pid, String alias, String fullName, boolean isDirectory) {
+ this.id = id;
+ this.pid = pid;
+ this.alias = alias;
+ this.fullName = fullName;
+ this.isDirectory = isDirectory;
+ }
+
+ /*public Resource(String alias, String fileName, String description, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
this.alias = alias;
this.fileName = fileName;
this.description = description;
@@ -98,6 +121,20 @@ public class Resource {
this.size = size;
this.createTime = createTime;
this.updateTime = updateTime;
+ }*/
+
+ public Resource(int pid, String alias, String fullName, boolean isDirectory, String description, String fileName, int userId, ResourceType type, long size, Date createTime, Date updateTime) {
+ this.pid = pid;
+ this.alias = alias;
+ this.fullName = fullName;
+ this.isDirectory = isDirectory;
+ this.description = description;
+ this.fileName = fileName;
+ this.userId = userId;
+ this.type = type;
+ this.size = size;
+ this.createTime = createTime;
+ this.updateTime = updateTime;
}
public int getId() {
@@ -116,6 +153,30 @@ public class Resource {
this.alias = alias;
}
+ public int getPid() {
+ return pid;
+ }
+
+ public void setPid(int pid) {
+ this.pid = pid;
+ }
+
+ public String getFullName() {
+ return fullName;
+ }
+
+ public void setFullName(String fullName) {
+ this.fullName = fullName;
+ }
+
+ public boolean isDirectory() {
+ return isDirectory;
+ }
+
+ public void setDirectory(boolean directory) {
+ isDirectory = directory;
+ }
+
public String getFileName() {
return fileName;
}
@@ -177,9 +238,12 @@ public class Resource {
public String toString() {
return "Resource{" +
"id=" + id +
+ ", pid=" + pid +
", alias='" + alias + '\'' +
- ", fileName='" + fileName + '\'' +
+ ", fullName='" + fullName + '\'' +
+ ", isDirectory=" + isDirectory +
", description='" + description + '\'' +
+ ", fileName='" + fileName + '\'' +
", userId=" + userId +
", type=" + type +
", size=" + size +
diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java
index 9f9225cb04..b75bb58b7d 100644
--- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java
+++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.java
@@ -20,9 +20,11 @@ import org.apache.dolphinscheduler.dao.entity.DefinitionGroupByUser;
import org.apache.dolphinscheduler.dao.entity.ProcessDefinition;
import com.baomidou.mybatisplus.core.mapper.BaseMapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
+import org.apache.ibatis.annotations.MapKey;
import org.apache.ibatis.annotations.Param;
import java.util.List;
+import java.util.Map;
/**
* process definition mapper interface
@@ -83,7 +85,7 @@ public interface ProcessDefinitionMapper extends BaseMapper {
List queryDefinitionListByTenant(@Param("tenantId") int tenantId);
/**
- * count process definition group by user
+ * count process definition group by user
* @param userId userId
* @param projectIds projectIds
* @param isAdmin isAdmin
@@ -93,4 +95,11 @@ public interface ProcessDefinitionMapper extends BaseMapper {
@Param("userId") Integer userId,
@Param("projectIds") Integer[] projectIds,
@Param("isAdmin") boolean isAdmin);
+
+ /**
+ * list all resource ids
+ * @return resource ids list
+ */
+ @MapKey("id")
+ List> listResources();
}
diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java
index cf65e5d08a..f07a92c0a2 100644
--- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java
+++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.java
@@ -30,12 +30,12 @@ public interface ResourceMapper extends BaseMapper {
/**
* query resource list
- * @param alias alias
+ * @param fullName full name
* @param userId userId
* @param type type
* @return resource list
*/
- List queryResourceList(@Param("alias") String alias,
+ List queryResourceList(@Param("fullName") String fullName,
@Param("userId") int userId,
@Param("type") int type);
@@ -59,6 +59,7 @@ public interface ResourceMapper extends BaseMapper {
*/
IPage queryResourcePaging(IPage page,
@Param("userId") int userId,
+ @Param("id") int id,
@Param("type") int type,
@Param("searchVal") String searchVal);
@@ -76,13 +77,13 @@ public interface ResourceMapper extends BaseMapper {
*/
List queryResourceExceptUserId(@Param("userId") int userId);
-
/**
* query tenant code by name
* @param resName resource name
+ * @param resType resource type
* @return tenant code
*/
- String queryTenantCodeByResourceName(@Param("resName") String resName);
+ String queryTenantCodeByResourceName(@Param("resName") String resName,@Param("resType") int resType);
/**
* list authorized resource
@@ -91,4 +92,48 @@ public interface ResourceMapper extends BaseMapper {
* @return resource list
*/
List listAuthorizedResource(@Param("userId") int userId,@Param("resNames")T[] resNames);
+
+ /**
+ * list authorized resource
+ * @param userId userId
+ * @param resIds resource ids
+ * @return resource list
+ */
+ List listAuthorizedResourceById(@Param("userId") int userId,@Param("resIds")T[] resIds);
+
+ /**
+ * delete resource by id array
+ * @param resIds resource id array
+ * @return delete num
+ */
+ int deleteIds(@Param("resIds")Integer[] resIds);
+
+ /**
+ * list children
+ * @param direcotyId directory id
+ * @return resource id array
+ */
+ List listChildren(@Param("direcotyId") int direcotyId);
+
+ /**
+ * query resource by full name or pid
+ * @param fullName full name
+ * @param type resource type
+ * @return resource
+ */
+ List queryResource(@Param("fullName") String fullName,@Param("type") int type);
+
+ /**
+ * list resource by id array
+ * @param resIds resource id array
+ * @return resource list
+ */
+ List listResourceByIds(@Param("resIds")Integer[] resIds);
+
+ /**
+ * update resource
+ * @param resourceList resource list
+ * @return update num
+ */
+ int batchUpdateResource(@Param("resourceList") List resourceList);
}
diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java
index 5a8734233c..2411c9b178 100644
--- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java
+++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.java
@@ -86,4 +86,19 @@ public interface UdfFuncMapper extends BaseMapper {
*/
List listAuthorizedUdfFunc (@Param("userId") int userId,@Param("udfIds")T[] udfIds);
+ /**
+ * list UDF by resource id
+ * @param resourceIds resource id array
+ * @return UDF function list
+ */
+ List listUdfByResourceId(@Param("resourceIds") int[] resourceIds);
+
+ /**
+ * list authorized UDF by resource id
+ * @param resourceIds resource id array
+ * @return UDF function list
+ */
+ List listAuthorizedUdfByResourceId(@Param("userId") int userId,@Param("resourceIds") int[] resourceIds);
+
+
}
diff --git a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
index f2157783e8..c9086b9f83 100644
--- a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
+++ b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
@@ -87,4 +87,11 @@
pd.user_id = u.id AND pd.project_id = p.id
AND pd.id = #{processDefineId}
+
+
+
+ SELECT id,resource_ids
+ FROM t_ds_process_definition
+ WHERE release_state = 1 and resource_ids is not null and resource_ids != ''
+
\ No newline at end of file
diff --git a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
index 2146d1ac20..c1fe50fd47 100644
--- a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
+++ b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ResourceMapper.xml
@@ -22,8 +22,8 @@
select *
from t_ds_resources
where 1= 1
-
- and alias = #{alias}
+
+ and full_name = #{fullName}
and type = #{type}
@@ -47,8 +47,8 @@
select *
from t_ds_resources
- where type=#{type}
-
+ where type=#{type} and pid=#{id}
+
and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
union select id as resources_id from t_ds_resources where user_id=#{userId})
@@ -70,7 +70,74 @@
select tenant_code
from t_ds_tenant t, t_ds_user u, t_ds_resources res
- where t.id = u.tenant_id and u.id = res.user_id and res.type=0
- and res.alias= #{resName}
+ where t.id = u.tenant_id and u.id = res.user_id and res.type=#{resType}
+ and res.full_name= #{resName}
+
+
+ select *
+ from t_ds_resources
+ where type=0
+ and id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
+ union select id as resources_id from t_ds_resources where user_id=#{userId})
+
+ and full_name in
+
+ #{i}
+
+
+
+
+ select *
+ from t_ds_resources
+ where id in (select resources_id from t_ds_relation_resources_user where user_id=#{userId}
+ union select id as resources_id from t_ds_resources where user_id=#{userId})
+
+ and id in
+
+ #{i}
+
+
+
+
+
+ delete from t_ds_resources where id in
+
+ #{i}
+
+
+
+
+ select id
+ from t_ds_resources
+ where pid = #{direcotyId}
+
+
+
+ select *
+ from t_ds_resources
+ where type = #{type}
+ and full_name = #{fullName}
+
+
+
+
+ update t_ds_resources
+
+ full_name=#{resource.fullName},
+ update_time=#{resource.updateTime}
+
+
+ id=#{resource.id}
+
+
+
+
+
+ select *
+ from t_ds_resources
+ where id in
+
+ #{i}
+
diff --git a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
index 0aa10607c4..e38d1637d6 100644
--- a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
+++ b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/UdfFuncMapper.xml
@@ -87,4 +87,28 @@
+
+ select *
+ from t_ds_udfs
+ where 1=1
+
+ and resource_id in
+
+ #{i}
+
+
+
+
+ select *
+ from t_ds_udfs
+ where
+ id in (select udf_id from t_ds_relation_udfs_user where user_id=#{userId}
+ union select id as udf_id from t_ds_udfs where user_id=#{userId})
+
+ and resource_id in
+
+ #{i}
+
+
+
\ No newline at end of file
diff --git a/dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java b/dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java
index 01082414a9..6a2aea5ad2 100644
--- a/dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java
+++ b/dolphinscheduler-dao/src/test/java/org/apache/dolphinscheduler/dao/mapper/ResourceMapperTest.java
@@ -34,6 +34,7 @@ import org.springframework.test.annotation.Rollback;
import org.springframework.test.context.junit4.SpringRunner;
import org.springframework.transaction.annotation.Transactional;
+import java.util.ArrayList;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
@@ -68,7 +69,10 @@ public class ResourceMapperTest {
private Resource insertOne(){
//insertOne
Resource resource = new Resource();
- resource.setAlias("ut resource");
+ resource.setAlias("ut-resource");
+ resource.setFullName("/ut-resource");
+ resource.setPid(-1);
+ resource.setDirectory(false);
resource.setType(ResourceType.FILE);
resource.setUserId(111);
resourceMapper.insert(resource);
@@ -80,16 +84,32 @@ public class ResourceMapperTest {
* @param user user
* @return Resource
*/
- private Resource createResource(User user){
+ private Resource createResource(User user,boolean isDirectory,ResourceType resourceType,int pid,String alias,String fullName){
//insertOne
Resource resource = new Resource();
- resource.setAlias(String.format("ut resource %s",user.getUserName()));
- resource.setType(ResourceType.FILE);
+ resource.setDirectory(isDirectory);
+ resource.setType(resourceType);
+ resource.setAlias(alias);
+ resource.setFullName(fullName);
resource.setUserId(user.getId());
resourceMapper.insert(resource);
return resource;
}
+ /**
+ * create resource by user
+ * @param user user
+ * @return Resource
+ */
+ private Resource createResource(User user){
+ //insertOne
+ String alias = String.format("ut-resource-%s",user.getUserName());
+ String fullName = String.format("/%s",alias);
+
+ Resource resource = createResource(user, false, ResourceType.FILE, -1, alias, fullName);
+ return resource;
+ }
+
/**
* create user
* @return User
@@ -200,13 +220,15 @@ public class ResourceMapperTest {
IPage resourceIPage = resourceMapper.queryResourcePaging(
page,
- resource.getUserId(),
+ 0,
+ -1,
resource.getType().ordinal(),
""
);
IPage resourceIPage1 = resourceMapper.queryResourcePaging(
page,
1110,
+ -1,
resource.getType().ordinal(),
""
);
@@ -289,7 +311,7 @@ public class ResourceMapperTest {
resourceMapper.updateById(resource);
String resource1 = resourceMapper.queryTenantCodeByResourceName(
- resource.getAlias()
+ resource.getFullName(),ResourceType.FILE.ordinal()
);
@@ -305,22 +327,37 @@ public class ResourceMapperTest {
User generalUser2 = createGeneralUser("user2");
// create one resource
Resource resource = createResource(generalUser2);
- Resource unauthorizedResource = createResource(generalUser2);
+ Resource unauthorizedResource = createResource(generalUser1);
// need download resources
- String[] resNames = new String[]{resource.getAlias(), unauthorizedResource.getAlias()};
+ String[] resNames = new String[]{resource.getFullName(), unauthorizedResource.getFullName()};
List resources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
Assert.assertEquals(generalUser2.getId(),resource.getUserId());
- Assert.assertFalse(resources.stream().map(t -> t.getAlias()).collect(toList()).containsAll(Arrays.asList(resNames)));
+ Assert.assertFalse(resources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
// authorize object unauthorizedResource to generalUser
createResourcesUser(unauthorizedResource,generalUser2);
List authorizedResources = resourceMapper.listAuthorizedResource(generalUser2.getId(), resNames);
- Assert.assertTrue(authorizedResources.stream().map(t -> t.getAlias()).collect(toList()).containsAll(Arrays.asList(resNames)));
+ Assert.assertTrue(authorizedResources.stream().map(t -> t.getFullName()).collect(toList()).containsAll(Arrays.asList(resNames)));
+
+ }
+
+ @Test
+ public void deleteIdsTest(){
+ // create a general user
+ User generalUser1 = createGeneralUser("user1");
+
+ Resource resource = createResource(generalUser1);
+ Resource resource1 = createResource(generalUser1);
+ List resourceList = new ArrayList<>();
+ resourceList.add(resource.getId());
+ resourceList.add(resource1.getId());
+ int result = resourceMapper.deleteIds(resourceList.toArray(new Integer[resourceList.size()]));
+ Assert.assertEquals(result,2);
}
}
\ No newline at end of file
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java
index 48048e7eba..c7806f1b66 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/runner/TaskScheduleThread.java
@@ -23,15 +23,18 @@ import com.alibaba.fastjson.JSON;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.ExecutionStatus;
+import org.apache.dolphinscheduler.common.enums.ResourceType;
import org.apache.dolphinscheduler.common.enums.TaskType;
import org.apache.dolphinscheduler.common.model.TaskNode;
import org.apache.dolphinscheduler.common.process.Property;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.TaskTimeoutParameter;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.HadoopUtils;
import org.apache.dolphinscheduler.common.utils.TaskParametersUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
+import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.TaskInstance;
import org.apache.dolphinscheduler.common.utils.LoggerUtils;
import org.apache.dolphinscheduler.common.log.TaskLogDiscriminator;
@@ -96,7 +99,7 @@ public class TaskScheduleThread implements Runnable {
TaskNode taskNode = JSON.parseObject(taskInstance.getTaskJson(), TaskNode.class);
// get resource files
- List resourceFiles = createProjectResFiles(taskNode);
+ List resourceFiles = createProjectResFiles(taskNode);
// copy hdfs/minio file to local
downloadResource(
taskInstance.getExecutePath(),
@@ -165,6 +168,7 @@ public class TaskScheduleThread implements Runnable {
new Date(),
taskInstance.getId());
}
+
/**
* get global paras map
* @return
@@ -289,14 +293,16 @@ public class TaskScheduleThread implements Runnable {
/**
* create project resource files
*/
- private List createProjectResFiles(TaskNode taskNode) throws Exception{
+ private List createProjectResFiles(TaskNode taskNode) throws Exception{
- Set projectFiles = new HashSet<>();
+ Set projectFiles = new HashSet<>();
AbstractParameters baseParam = TaskParametersUtils.getParameters(taskNode.getType(), taskNode.getParams());
if (baseParam != null) {
- List projectResourceFiles = baseParam.getResourceFilesList();
- projectFiles.addAll(projectResourceFiles);
+ List projectResourceFiles = baseParam.getResourceFilesList();
+ if (projectResourceFiles != null) {
+ projectFiles.addAll(projectResourceFiles);
+ }
}
return new ArrayList<>(projectFiles);
@@ -309,18 +315,25 @@ public class TaskScheduleThread implements Runnable {
* @param projectRes
* @param logger
*/
- private void downloadResource(String execLocalPath, List projectRes, Logger logger) throws Exception {
+ private void downloadResource(String execLocalPath, List projectRes, Logger logger) throws Exception {
checkDownloadPermission(projectRes);
- for (String res : projectRes) {
- File resFile = new File(execLocalPath, res);
+ String resourceName;
+ for (ResourceInfo res : projectRes) {
+ if (res.getId() != 0) {
+ Resource resource = processService.getResourceById(res.getId());
+ resourceName = resource.getFullName();
+ }else{
+ resourceName = res.getRes();
+ }
+ File resFile = new File(execLocalPath, resourceName);
if (!resFile.exists()) {
try {
// query the tenant code of the resource according to the name of the resource
- String tentnCode = processService.queryTenantCodeByResName(res);
- String resHdfsPath = HadoopUtils.getHdfsFilename(tentnCode, res);
+ String tentnCode = processService.queryTenantCodeByResName(resourceName, ResourceType.FILE);
+ String resHdfsPath = HadoopUtils.getHdfsResourceFileName(tentnCode, resourceName);
logger.info("get resource file from hdfs :{}", resHdfsPath);
- HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + res, false, true);
+ HadoopUtils.getInstance().copyHdfsToLocal(resHdfsPath, execLocalPath + File.separator + resourceName, false, true);
}catch (Exception e){
logger.error(e.getMessage(),e);
throw new RuntimeException(e.getMessage());
@@ -336,10 +349,17 @@ public class TaskScheduleThread implements Runnable {
* @param projectRes resource name list
* @throws Exception exception
*/
- private void checkDownloadPermission(List projectRes) throws Exception {
+ private void checkDownloadPermission(List projectRes) throws Exception {
+
int userId = taskInstance.getProcessInstance().getExecutorId();
- String[] resNames = projectRes.toArray(new String[projectRes.size()]);
- PermissionCheck permissionCheck = new PermissionCheck<>(AuthorizationType.RESOURCE_FILE, processService,resNames,userId,logger);
- permissionCheck.checkPermission();
+ if (projectRes.stream().allMatch(t->t.getId() == 0)) {
+ String[] resNames = projectRes.stream().map(t -> t.getRes()).collect(Collectors.toList()).toArray(new String[projectRes.size()]);
+ PermissionCheck permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_NAME,processService,resNames,userId,logger);
+ permissionCheck.checkPermission();
+ }else{
+ Integer[] resIds = projectRes.stream().map(t -> t.getId()).collect(Collectors.toList()).toArray(new Integer[projectRes.size()]);
+ PermissionCheck permissionCheck = new PermissionCheck(AuthorizationType.RESOURCE_FILE_ID,processService,resIds,userId,logger);
+ permissionCheck.checkPermission();
+ }
}
}
\ No newline at end of file
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java
index 39f4dfbb97..cda12ca525 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/AbstractYarnTask.java
@@ -94,4 +94,9 @@ public abstract class AbstractYarnTask extends AbstractTask {
* @throws Exception exception
*/
protected abstract String buildCommand() throws Exception;
+
+ /**
+ * set main jar name
+ */
+ protected abstract void setMainJarName();
}
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
index c562fbe4dd..0dc7c6a638 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/flink/FlinkTask.java
@@ -17,12 +17,14 @@
package org.apache.dolphinscheduler.server.worker.task.flink;
import org.apache.dolphinscheduler.common.process.Property;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.flink.FlinkParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
import org.apache.dolphinscheduler.dao.entity.ProcessInstance;
+import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.FlinkArgsUtils;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
@@ -63,6 +65,7 @@ public class FlinkTask extends AbstractYarnTask {
if (!flinkParameters.checkParameters()) {
throw new RuntimeException("flink task params is not valid");
}
+ setMainJarName();
flinkParameters.setQueue(taskProps.getQueue());
if (StringUtils.isNotEmpty(flinkParameters.getMainArgs())) {
@@ -111,6 +114,28 @@ public class FlinkTask extends AbstractYarnTask {
return command;
}
+ @Override
+ protected void setMainJarName() {
+ // main jar
+ ResourceInfo mainJar = flinkParameters.getMainJar();
+ if (mainJar != null) {
+ int resourceId = mainJar.getId();
+ String resourceName;
+ if (resourceId == 0) {
+ resourceName = mainJar.getRes();
+ } else {
+ Resource resource = processService.getResourceById(flinkParameters.getMainJar().getId());
+ if (resource == null) {
+ logger.error("resource id: {} not exist", resourceId);
+ throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
+ }
+ resourceName = resource.getFullName().replaceFirst("/", "");
+ }
+ mainJar.setRes(resourceName);
+ flinkParameters.setMainJar(mainJar);
+ }
+ }
+
@Override
public AbstractParameters getParameters() {
return flinkParameters;
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java
index 7f6baad427..0909fbd06e 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/mr/MapReduceTask.java
@@ -19,11 +19,13 @@ package org.apache.dolphinscheduler.server.worker.task.mr;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.ProgramType;
import org.apache.dolphinscheduler.common.process.Property;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.mr.MapreduceParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
import org.apache.dolphinscheduler.server.worker.task.TaskProps;
@@ -64,7 +66,7 @@ public class MapReduceTask extends AbstractYarnTask {
if (!mapreduceParameters.checkParameters()) {
throw new RuntimeException("mapreduce task params is not valid");
}
-
+ setMainJarName();
mapreduceParameters.setQueue(taskProps.getQueue());
// replace placeholder
@@ -99,6 +101,28 @@ public class MapReduceTask extends AbstractYarnTask {
return command;
}
+ @Override
+ protected void setMainJarName() {
+ // main jar
+ ResourceInfo mainJar = mapreduceParameters.getMainJar();
+ if (mainJar != null) {
+ int resourceId = mainJar.getId();
+ String resourceName;
+ if (resourceId == 0) {
+ resourceName = mainJar.getRes();
+ } else {
+ Resource resource = processService.getResourceById(mapreduceParameters.getMainJar().getId());
+ if (resource == null) {
+ logger.error("resource id: {} not exist", resourceId);
+ throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
+ }
+ resourceName = resource.getFullName().replaceFirst("/", "");
+ }
+ mainJar.setRes(resourceName);
+ mapreduceParameters.setMainJar(mainJar);
+ }
+ }
+
@Override
public AbstractParameters getParameters() {
return mapreduceParameters;
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java
index 203c0fe146..d2a8674146 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/spark/SparkTask.java
@@ -18,11 +18,13 @@ package org.apache.dolphinscheduler.server.worker.task.spark;
import org.apache.dolphinscheduler.common.enums.SparkVersion;
import org.apache.dolphinscheduler.common.process.Property;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.task.AbstractParameters;
import org.apache.dolphinscheduler.common.task.spark.SparkParameters;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.common.utils.StringUtils;
+import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.server.utils.ParamUtils;
import org.apache.dolphinscheduler.server.utils.SparkArgsUtils;
import org.apache.dolphinscheduler.server.worker.task.AbstractYarnTask;
@@ -67,8 +69,8 @@ public class SparkTask extends AbstractYarnTask {
if (!sparkParameters.checkParameters()) {
throw new RuntimeException("spark task params is not valid");
}
+ setMainJarName();
sparkParameters.setQueue(taskProps.getQueue());
-
if (StringUtils.isNotEmpty(sparkParameters.getMainArgs())) {
String args = sparkParameters.getMainArgs();
@@ -115,6 +117,28 @@ public class SparkTask extends AbstractYarnTask {
return command;
}
+ @Override
+ protected void setMainJarName() {
+ // main jar
+ ResourceInfo mainJar = sparkParameters.getMainJar();
+ if (mainJar != null) {
+ int resourceId = mainJar.getId();
+ String resourceName;
+ if (resourceId == 0) {
+ resourceName = mainJar.getRes();
+ } else {
+ Resource resource = processService.getResourceById(sparkParameters.getMainJar().getId());
+ if (resource == null) {
+ logger.error("resource id: {} not exist", resourceId);
+ throw new RuntimeException(String.format("resource id: %d not exist", resourceId));
+ }
+ resourceName = resource.getFullName().replaceFirst("/", "");
+ }
+ mainJar.setRes(resourceName);
+ sparkParameters.setMainJar(mainJar);
+ }
+ }
+
@Override
public AbstractParameters getParameters() {
return sparkParameters;
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java
index 64bc7924d2..5c701dcd52 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sqoop/SqoopTask.java
@@ -71,6 +71,10 @@ public class SqoopTask extends AbstractYarnTask {
return null;
}
+ @Override
+ protected void setMainJarName() {
+ }
+
@Override
public AbstractParameters getParameters() {
return sqoopParameters;
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java
index e53fae6e86..9f93f4ce3e 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/permission/PermissionCheck.java
@@ -18,6 +18,7 @@ package org.apache.dolphinscheduler.service.permission;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
import org.apache.dolphinscheduler.common.enums.UserType;
+import org.apache.dolphinscheduler.common.process.ResourceInfo;
import org.apache.dolphinscheduler.common.utils.CollectionUtils;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.service.process.ProcessService;
@@ -45,6 +46,11 @@ public class PermissionCheck {
*/
private T[] needChecks;
+ /**
+ * resoruce info
+ */
+ private List resourceList;
+
/**
* user id
*/
@@ -90,6 +96,22 @@ public class PermissionCheck {
this.logger = logger;
}
+ /**
+ * permission check
+ * @param logger
+ * @param authorizationType
+ * @param processService
+ * @param resourceList
+ * @param userId
+ */
+ public PermissionCheck(AuthorizationType authorizationType, ProcessService processService, List resourceList, int userId,Logger logger) {
+ this.authorizationType = authorizationType;
+ this.processService = processService;
+ this.resourceList = resourceList;
+ this.userId = userId;
+ this.logger = logger;
+ }
+
public AuthorizationType getAuthorizationType() {
return authorizationType;
}
@@ -122,6 +144,14 @@ public class PermissionCheck {
this.userId = userId;
}
+ public List getResourceList() {
+ return resourceList;
+ }
+
+ public void setResourceList(List resourceList) {
+ this.resourceList = resourceList;
+ }
+
/**
* has permission
* @return true if has permission
@@ -141,6 +171,7 @@ public class PermissionCheck {
*/
public void checkPermission() throws Exception{
if(this.needChecks.length > 0){
+
// get user type in order to judge whether the user is admin
User user = processService.getUserById(userId);
if (user.getUserType() != UserType.ADMIN_USER){
diff --git a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
index c848ec5197..3312c1004a 100644
--- a/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
+++ b/dolphinscheduler-service/src/main/java/org/apache/dolphinscheduler/service/process/ProcessService.java
@@ -1556,10 +1556,11 @@ public class ProcessService {
/**
* find tenant code by resource name
* @param resName resource name
+ * @param resourceType resource type
* @return tenant code
*/
- public String queryTenantCodeByResName(String resName){
- return resourceMapper.queryTenantCodeByResourceName(resName);
+ public String queryTenantCodeByResName(String resName,ResourceType resourceType){
+ return resourceMapper.queryTenantCodeByResourceName(resName,resourceType.ordinal());
}
/**
@@ -1791,10 +1792,18 @@ public class ProcessService {
Set originResSet = new HashSet(Arrays.asList(needChecks));
switch (authorizationType){
- case RESOURCE_FILE:
- Set authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getAlias()).collect(toSet());
+ case RESOURCE_FILE_ID:
+ Set authorizedResourceFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
+ originResSet.removeAll(authorizedResourceFiles);
+ break;
+ case RESOURCE_FILE_NAME:
+ Set authorizedResources = resourceMapper.listAuthorizedResource(userId, needChecks).stream().map(t -> t.getFullName()).collect(toSet());
originResSet.removeAll(authorizedResources);
break;
+ case UDF_FILE:
+ Set authorizedUdfFiles = resourceMapper.listAuthorizedResourceById(userId, needChecks).stream().map(t -> t.getId()).collect(toSet());
+ originResSet.removeAll(authorizedUdfFiles);
+ break;
case DATASOURCE:
Set authorizedDatasources = dataSourceMapper.listAuthorizedDataSource(userId,needChecks).stream().map(t -> t.getId()).collect(toSet());
originResSet.removeAll(authorizedDatasources);
@@ -1820,5 +1829,14 @@ public class ProcessService {
return userMapper.queryDetailsById(userId);
}
+ /**
+ * get resource by resoruce id
+ * @param resoruceId resource id
+ * @return Resource
+ */
+ public Resource getResourceById(int resoruceId){
+ return resourceMapper.selectById(resoruceId);
+ }
+
}
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js
index 9412a8cf38..6a17239e65 100755
--- a/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/dag/_source/plugIn/jsPlumbHandle.js
@@ -724,7 +724,7 @@ JSP.prototype.handleEvent = function () {
} else {
$(`#${sourceId}`).attr('data-nodenumber',Number($(`#${sourceId}`).attr('data-nodenumber'))+1)
}
-
+
// Storage node dependency information
saveTargetarr(sourceId, targetId)
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/_source/common.js b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/_source/common.js
old mode 100644
new mode 100755
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue
old mode 100644
new mode 100755
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
index b082f883fb..ad33503532 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
@@ -116,4 +116,4 @@
},
components: { mPopup, mListBoxF }
}
-
\ No newline at end of file
+
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/index.vue
old mode 100644
new mode 100755
diff --git a/dolphinscheduler-ui/src/js/conf/home/store/resource/actions.js b/dolphinscheduler-ui/src/js/conf/home/store/resource/actions.js
old mode 100644
new mode 100755
diff --git a/dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue b/dolphinscheduler-ui/src/js/module/components/fileUpdate/fileUpdate.vue
old mode 100644
new mode 100755
diff --git a/sql/soft_version b/sql/soft_version
index 867e52437a..d2d61a7e8e 100644
--- a/sql/soft_version
+++ b/sql/soft_version
@@ -1 +1 @@
-1.2.0
\ No newline at end of file
+1.2.2
\ No newline at end of file
diff --git a/sql/upgrade/1.2.2_schema/mysql/dolphinscheduler_ddl.sql b/sql/upgrade/1.2.2_schema/mysql/dolphinscheduler_ddl.sql
index 049484ce3a..f960d5ce49 100644
--- a/sql/upgrade/1.2.2_schema/mysql/dolphinscheduler_ddl.sql
+++ b/sql/upgrade/1.2.2_schema/mysql/dolphinscheduler_ddl.sql
@@ -74,4 +74,84 @@ d//
delimiter ;
CALL uc_dolphin_T_t_ds_task_instance_C_app_link;
-DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_C_app_link;
\ No newline at end of file
+DROP PROCEDURE uc_dolphin_T_t_ds_task_instance_C_app_link;
+
+-- ac_dolphin_T_t_ds_resources_A_pid
+drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_resources_A_pid;
+delimiter d//
+CREATE PROCEDURE ac_dolphin_T_t_ds_resources_A_pid()
+ BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_NAME='t_ds_resources'
+ AND TABLE_SCHEMA=(SELECT DATABASE())
+ AND COLUMN_NAME ='pid')
+ THEN
+ ALTER TABLE t_ds_resources ADD `pid` int(11) DEFAULT -1 COMMENT 'parent id';
+ END IF;
+ END;
+
+d//
+
+delimiter ;
+CALL ac_dolphin_T_t_ds_resources_A_pid;
+DROP PROCEDURE ac_dolphin_T_t_ds_resources_A_pid;
+
+-- ac_dolphin_T_t_ds_resources_A_full_name
+drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_resources_A_full_name;
+delimiter d//
+CREATE PROCEDURE ac_dolphin_T_t_ds_resources_A_full_name()
+ BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_NAME='t_ds_resources'
+ AND TABLE_SCHEMA=(SELECT DATABASE())
+ AND COLUMN_NAME ='full_name')
+ THEN
+ ALTER TABLE t_ds_resources ADD `full_name` varchar(255) DEFAULT NULL COMMENT 'full name';
+ END IF;
+ END;
+
+d//
+
+delimiter ;
+CALL ac_dolphin_T_t_ds_resources_A_full_name;
+DROP PROCEDURE ac_dolphin_T_t_ds_resources_A_full_name;
+
+-- ac_dolphin_T_t_ds_resources_A_pid
+drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_resources_is_directory;
+delimiter d//
+CREATE PROCEDURE ac_dolphin_T_t_ds_resources_is_directory()
+ BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_NAME='t_ds_resources'
+ AND TABLE_SCHEMA=(SELECT DATABASE())
+ AND COLUMN_NAME ='is_directory')
+ THEN
+ ALTER TABLE t_ds_resources ADD `is_directory` tinyint(1) DEFAULT 0 COMMENT 'is directory';
+ END IF;
+ END;
+
+d//
+
+delimiter ;
+CALL ac_dolphin_T_t_ds_resources_is_directory;
+DROP PROCEDURE ac_dolphin_T_t_ds_resources_is_directory;
+
+-- ac_dolphin_T_t_ds_process_definition_A_resource_ids
+drop PROCEDURE if EXISTS ac_dolphin_T_t_ds_process_definition_A_resource_ids;
+delimiter d//
+CREATE PROCEDURE ac_dolphin_T_t_ds_process_definition_A_resource_ids()
+ BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_NAME='t_ds_process_definition'
+ AND TABLE_SCHEMA=(SELECT DATABASE())
+ AND COLUMN_NAME ='resource_ids')
+ THEN
+ ALTER TABLE t_ds_process_definition ADD `resource_ids` varchar(255) DEFAULT NULL COMMENT 'resource ids';
+ END IF;
+ END;
+
+d//
+
+delimiter ;
+CALL ac_dolphin_T_t_ds_process_definition_A_resource_ids;
+DROP PROCEDURE ac_dolphin_T_t_ds_process_definition_A_resource_ids;
\ No newline at end of file
diff --git a/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_ddl.sql b/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_ddl.sql
index b1e0fd941c..9b5f15b8ae 100644
--- a/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_ddl.sql
+++ b/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_ddl.sql
@@ -66,4 +66,87 @@ d//
delimiter ;
SELECT uc_dolphin_T_t_ds_task_instance_C_app_link();
-DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_C_app_link();
\ No newline at end of file
+DROP FUNCTION IF EXISTS uc_dolphin_T_t_ds_task_instance_C_app_link();
+
+
+-- ac_dolphin_T_t_ds_resources_A_pid
+delimiter d//
+CREATE FUNCTION ac_dolphin_T_t_ds_resources_A_pid() RETURNS void AS $$
+BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_CATALOG=current_database()
+ AND TABLE_SCHEMA=current_schema()
+ AND TABLE_NAME='t_ds_resources'
+ AND COLUMN_NAME ='pid')
+ THEN
+ ALTER TABLE t_ds_resources ADD COLUMN pid int DEFAULT -1;
+ END IF;
+END;
+$$ LANGUAGE plpgsql;
+d//
+delimiter ;
+select ac_dolphin_T_t_ds_resources_A_pid();
+DROP FUNCTION ac_dolphin_T_t_ds_resources_A_pid();
+
+-- ac_dolphin_T_t_ds_resources_A_full_name
+delimiter ;
+DROP FUNCTION IF EXISTS ac_dolphin_T_t_ds_resources_A_full_name();
+delimiter d//
+CREATE FUNCTION ac_dolphin_T_t_ds_resources_A_full_name() RETURNS void AS $$
+BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_CATALOG=current_database()
+ AND TABLE_SCHEMA=current_schema()
+ AND TABLE_NAME='t_ds_resources'
+ AND COLUMN_NAME ='full_name')
+ THEN
+ ALTER TABLE t_ds_resources ADD COLUMN full_name varchar(255) DEFAULT null;
+ END IF;
+END;
+$$ LANGUAGE plpgsql;
+d//
+delimiter ;
+select ac_dolphin_T_t_ds_resources_A_full_name();
+DROP FUNCTION ac_dolphin_T_t_ds_resources_A_full_name();
+
+-- ac_dolphin_T_t_ds_resources_A_is_directory
+delimiter ;
+DROP FUNCTION IF EXISTS ac_dolphin_T_t_ds_resources_A_is_directory();
+delimiter d//
+CREATE FUNCTION ac_dolphin_T_t_ds_resources_A_is_directory() RETURNS void AS $$
+BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_CATALOG=current_database()
+ AND TABLE_SCHEMA=current_schema()
+ AND TABLE_NAME='t_ds_resources'
+ AND COLUMN_NAME ='is_directory')
+ THEN
+ ALTER TABLE t_ds_resources ADD COLUMN is_directory boolean DEFAULT false;
+ END IF;
+END;
+$$ LANGUAGE plpgsql;
+d//
+delimiter ;
+select ac_dolphin_T_t_ds_resources_A_is_directory();
+DROP FUNCTION ac_dolphin_T_t_ds_resources_A_is_directory();
+
+-- ac_dolphin_T_t_ds_process_definition_A_resource_ids
+delimiter ;
+DROP FUNCTION IF EXISTS ac_dolphin_T_t_ds_process_definition_A_resource_ids();
+delimiter d//
+CREATE FUNCTION ac_dolphin_T_t_ds_process_definition_A_resource_ids() RETURNS void AS $$
+BEGIN
+ IF NOT EXISTS (SELECT 1 FROM information_schema.COLUMNS
+ WHERE TABLE_CATALOG=current_database()
+ AND TABLE_SCHEMA=current_schema()
+ AND TABLE_NAME='t_ds_process_definition'
+ AND COLUMN_NAME ='resource_ids')
+ THEN
+ ALTER TABLE t_ds_process_definition ADD COLUMN resource_ids varchar(255) DEFAULT null;
+ END IF;
+END;
+$$ LANGUAGE plpgsql;
+d//
+delimiter ;
+select ac_dolphin_T_t_ds_process_definition_A_resource_ids();
+DROP FUNCTION ac_dolphin_T_t_ds_process_definition_A_resource_ids();
diff --git a/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_dml.sql b/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_dml.sql
index 38964cc551..f892fa8c91 100644
--- a/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_dml.sql
+++ b/sql/upgrade/1.2.2_schema/postgresql/dolphinscheduler_dml.sql
@@ -13,4 +13,6 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
-*/
\ No newline at end of file
+*/
+UPDATE t_ds_resources SET pid=-1,is_directory=false WHERE pid IS NULL;
+UPDATE t_ds_resources SET full_name = concat('/',alias) WHERE pid=-1 and full_name IS NULL;
\ No newline at end of file
From a851168a350e300becb3452e91871220ffa3a5fc Mon Sep 17 00:00:00 2001
From: Rubik-W <39549317+Rubik-W@users.noreply.github.com>
Date: Sat, 28 Mar 2020 15:50:14 +0800
Subject: [PATCH 35/58] =?UTF-8?q?fix=EF=BC=9AOptimize=20content=20returned?=
=?UTF-8?q?=20by=20interface=20(#2294)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
.../api/service/ProcessInstanceService.java | 8 +-------
.../dao/mapper/ProcessDefinitionMapper.xml | 4 +++-
.../dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml | 7 ++++++-
3 files changed, 10 insertions(+), 9 deletions(-)
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
index 57f839af1b..ab5580cc3e 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/ProcessInstanceService.java
@@ -204,14 +204,8 @@ public class ProcessInstanceService extends BaseDAGService {
}
}
- Set exclusionSet = new HashSet<>();
- exclusionSet.add(Constants.CLASS);
- exclusionSet.add("locations");
- exclusionSet.add("connects");
- exclusionSet.add("processInstanceJson");
-
pageInfo.setTotalCount((int) processInstanceList.getTotal());
- pageInfo.setLists(CollectionUtils.getListByExclusion(processInstances, exclusionSet));
+ pageInfo.setLists(processInstances);
result.put(Constants.DATA_LIST, pageInfo);
putMsg(result, Status.SUCCESS);
return result;
diff --git a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
index c9086b9f83..0cabf800cd 100644
--- a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
+++ b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessDefinitionMapper.xml
@@ -29,7 +29,9 @@
and pd.name = #{processDefinitionName}
- SELECT td.*,sc.schedule_release_state,tu.user_name
+ SELECT td.id, td.name, td.version, td.release_state, td.project_id, td.user_id, td.description, td.global_params,
+ td.flag, td.receivers, td.receivers_cc, td.timeout, td.tenant_id, td.modify_by, td.update_time, td.create_time,
+ sc.schedule_release_state, tu.user_name
FROM t_ds_process_definition td
left join (select process_definition_id,release_state as schedule_release_state from t_ds_schedules group by process_definition_id,release_state) sc on sc.process_definition_id = td.id
left join t_ds_user tu on td.user_id = tu.id
diff --git a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
index 3559ca9c85..e5697d1a60 100644
--- a/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
+++ b/dolphinscheduler-dao/src/main/resources/org/apache/dolphinscheduler/dao/mapper/ProcessInstanceMapper.xml
@@ -66,7 +66,12 @@
- select instance.*
+ select instance.id, instance.name, instance.process_definition_id, instance.state, instance.recovery, instance.start_time,
+ instance.end_time, instance.run_times, instance.host, instance.command_type, instance.command_param, instance.task_depend_type,
+ instance.max_try_times, instance.failure_strategy, instance.warning_type, instance.warning_group_id, instance.schedule_time,
+ instance.command_start_time, instance.global_params, instance.flag, instance.is_sub_process, instance.executor_id,
+ instance.history_cmd, instance.dependence_schedule_times, instance.process_instance_priority, instance.worker_group_id,
+ instance.timeout, instance.tenant_id, instance.update_time
from t_ds_process_instance instance
join t_ds_process_definition define ON instance.process_definition_id = define.id
where 1=1
From d4735334a1986fedd5b92d185dc6d46be93cb071 Mon Sep 17 00:00:00 2001
From: t1mon <178317391@qq.com>
Date: Sat, 28 Mar 2020 17:42:33 +0800
Subject: [PATCH 36/58] Fix SqlTask kerberos load scope unreasonable. #2178
(#2321)
* Optimize PropertyUtils instantiation.
* Fix info error.
* Fix SqlTask kerberos load scope unreasonable. #2178
---
.../dolphinscheduler/server/worker/task/sql/SqlTask.java | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
index ab314b6f8e..12f4b580e9 100644
--- a/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
+++ b/dolphinscheduler-server/src/main/java/org/apache/dolphinscheduler/server/worker/task/sql/SqlTask.java
@@ -25,6 +25,7 @@ import org.apache.commons.lang.StringUtils;
import org.apache.dolphinscheduler.alert.utils.MailUtils;
import org.apache.dolphinscheduler.common.Constants;
import org.apache.dolphinscheduler.common.enums.AuthorizationType;
+import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.enums.ShowType;
import org.apache.dolphinscheduler.common.enums.TaskTimeoutStrategy;
import org.apache.dolphinscheduler.common.enums.UdfType;
@@ -247,11 +248,12 @@ public class SqlTask extends AbstractTask {
List createFuncs){
Connection connection = null;
try {
- // if upload resource is HDFS and kerberos startup
- CommonUtils.loadKerberosConf();
// if hive , load connection params if exists
- if (HIVE == dataSource.getType()) {
+ if (DbType.HIVE == dataSource.getType() || DbType.SPARK == dataSource.getType()) {
+ // if upload resource is HDFS and kerberos startup
+ CommonUtils.loadKerberosConf();
+
Properties paramProp = new Properties();
paramProp.setProperty(USER, baseDataSource.getUser());
paramProp.setProperty(PASSWORD, baseDataSource.getPassword());
From f6ca5480ddc1f2b7a7393b86977f1b5332dcc9b1 Mon Sep 17 00:00:00 2001
From: liwenhe1993 <32166572+liwenhe1993@users.noreply.github.com>
Date: Sat, 28 Mar 2020 17:45:58 +0800
Subject: [PATCH 37/58] Support kubernetes deployment (#2153)
* Support kubernetes deployment
* Support kubernetes deployment
---
charts/README.md | 226 +++++++++++
charts/dolphinscheduler/.helmignore | 23 ++
charts/dolphinscheduler/Chart.yaml | 52 +++
charts/dolphinscheduler/README.md | 226 +++++++++++
charts/dolphinscheduler/templates/NOTES.txt | 44 +++
.../dolphinscheduler/templates/_helpers.tpl | 149 ++++++++
.../configmap-dolphinscheduler-alert.yaml | 41 ++
.../configmap-dolphinscheduler-master.yaml | 34 ++
.../configmap-dolphinscheduler-worker.yaml | 39 ++
.../deployment-dolphinscheduler-alert.yaml | 228 +++++++++++
.../deployment-dolphinscheduler-api.yaml | 161 ++++++++
.../deployment-dolphinscheduler-frontend.yaml | 102 +++++
.../dolphinscheduler/templates/ingress.yaml | 43 +++
.../templates/pvc-dolphinscheduler-alert.yaml | 35 ++
.../templates/pvc-dolphinscheduler-api.yaml | 35 ++
.../pvc-dolphinscheduler-frontend.yaml | 35 ++
.../templates/secret-external-postgresql.yaml | 29 ++
.../statefulset-dolphinscheduler-master.yaml | 247 ++++++++++++
.../statefulset-dolphinscheduler-worker.yaml | 275 ++++++++++++++
.../templates/svc-dolphinscheduler-api.yaml | 35 ++
.../svc-dolphinscheduler-frontend.yaml | 35 ++
.../svc-dolphinscheduler-master-headless.yaml | 36 ++
.../svc-dolphinscheduler-worker-headless.yaml | 36 ++
charts/dolphinscheduler/values.yaml | 355 ++++++++++++++++++
24 files changed, 2521 insertions(+)
create mode 100644 charts/README.md
create mode 100644 charts/dolphinscheduler/.helmignore
create mode 100644 charts/dolphinscheduler/Chart.yaml
create mode 100644 charts/dolphinscheduler/README.md
create mode 100644 charts/dolphinscheduler/templates/NOTES.txt
create mode 100644 charts/dolphinscheduler/templates/_helpers.tpl
create mode 100644 charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
create mode 100644 charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
create mode 100644 charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
create mode 100644 charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
create mode 100644 charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
create mode 100644 charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
create mode 100644 charts/dolphinscheduler/templates/ingress.yaml
create mode 100644 charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
create mode 100644 charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
create mode 100644 charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
create mode 100644 charts/dolphinscheduler/templates/secret-external-postgresql.yaml
create mode 100644 charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
create mode 100644 charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
create mode 100644 charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
create mode 100644 charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
create mode 100644 charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
create mode 100644 charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
create mode 100644 charts/dolphinscheduler/values.yaml
diff --git a/charts/README.md b/charts/README.md
new file mode 100644
index 0000000000..6f0317b9e2
--- /dev/null
+++ b/charts/README.md
@@ -0,0 +1,226 @@
+# Dolphin Scheduler
+
+[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
+
+## Introduction
+This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.10+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ git clone https://github.com/apache/incubator-dolphinscheduler.git
+$ cd incubator-dolphinscheduler
+$ helm install --name dolphinscheduler .
+```
+These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `dolphinscheduler` deployment:
+
+```bash
+$ helm delete --purge dolphinscheduler
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
+
+| Parameter | Description | Default |
+| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
+| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
+| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
+| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
+| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
+| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
+| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
+| | | |
+| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
+| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
+| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
+| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
+| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
+| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
+| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
+| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
+| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
+| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
+| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
+| | | |
+| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
+| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
+| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
+| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
+| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
+| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
+| | | |
+| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
+| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
+| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `master.tolerations` | If specified, the pod's tolerations | `{}` |
+| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
+| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
+| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
+| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
+| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
+| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
+| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
+| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
+| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
+| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
+| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
+| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
+| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
+| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
+| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
+| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
+| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
+| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
+| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
+| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
+| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
+| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
+| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
+| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
+| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
+| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
+| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
+| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
+| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
+| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
+| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
+| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
+| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
+| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
+| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
+| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
+| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
+| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
+| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `api.tolerations` | If specified, the pod's tolerations | `{}` |
+| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
+| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
+| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
+| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
+| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
+| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
+| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
+| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `ingress.enabled` | Enable ingress | `false` |
+| `ingress.host` | Ingress host | `dolphinscheduler.org` |
+| `ingress.path` | Ingress path | `/` |
+| `ingress.tls.enabled` | Enable ingress tls | `false` |
+| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
+| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
+
+For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.
diff --git a/charts/dolphinscheduler/.helmignore b/charts/dolphinscheduler/.helmignore
new file mode 100644
index 0000000000..0e8a0eb36f
--- /dev/null
+++ b/charts/dolphinscheduler/.helmignore
@@ -0,0 +1,23 @@
+# Patterns to ignore when building packages.
+# This supports shell glob matching, relative path matching, and
+# negation (prefixed with !). Only one pattern per line.
+.DS_Store
+# Common VCS dirs
+.git/
+.gitignore
+.bzr/
+.bzrignore
+.hg/
+.hgignore
+.svn/
+# Common backup files
+*.swp
+*.bak
+*.tmp
+*.orig
+*~
+# Various IDEs
+.project
+.idea/
+*.tmproj
+.vscode/
diff --git a/charts/dolphinscheduler/Chart.yaml b/charts/dolphinscheduler/Chart.yaml
new file mode 100644
index 0000000000..2c40f94d3c
--- /dev/null
+++ b/charts/dolphinscheduler/Chart.yaml
@@ -0,0 +1,52 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v2
+name: dolphinscheduler
+description: Dolphin Scheduler is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
+home: https://dolphinscheduler.apache.org
+icon: https://dolphinscheduler.apache.org/img/hlogo_colorful.svg
+keywords:
+ - dolphinscheduler
+ - Scheduler
+# A chart can be either an 'application' or a 'library' chart.
+#
+# Application charts are a collection of templates that can be packaged into versioned archives
+# to be deployed.
+#
+# Library charts provide useful utilities or functions for the chart developer. They're included as
+# a dependency of application charts to inject those utilities and functions into the rendering
+# pipeline. Library charts do not define any templates and therefore cannot be deployed.
+type: application
+
+# This is the chart version. This version number should be incremented each time you make changes
+# to the chart and its templates, including the app version.
+version: 0.1.0
+
+# This is the version number of the application being deployed. This version number should be
+# incremented each time you make changes to the application.
+appVersion: 1.2.1
+
+dependencies:
+ - name: postgresql
+ version: 8.x.x
+ repository: https://charts.bitnami.com/bitnami
+ condition: postgresql.enabled
+ - name: zookeeper
+ version: 5.x.x
+ repository: https://charts.bitnami.com/bitnami
+ condition: redis.enabled
diff --git a/charts/dolphinscheduler/README.md b/charts/dolphinscheduler/README.md
new file mode 100644
index 0000000000..6f0317b9e2
--- /dev/null
+++ b/charts/dolphinscheduler/README.md
@@ -0,0 +1,226 @@
+# Dolphin Scheduler
+
+[Dolphin Scheduler](https://dolphinscheduler.apache.org) is a distributed and easy-to-expand visual DAG workflow scheduling system, dedicated to solving the complex dependencies in data processing, making the scheduling system out of the box for data processing.
+
+## Introduction
+This chart bootstraps a [Dolphin Scheduler](https://dolphinscheduler.apache.org) distributed deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
+
+## Prerequisites
+
+- Kubernetes 1.10+
+- PV provisioner support in the underlying infrastructure
+
+## Installing the Chart
+
+To install the chart with the release name `my-release`:
+
+```bash
+$ git clone https://github.com/apache/incubator-dolphinscheduler.git
+$ cd incubator-dolphinscheduler
+$ helm install --name dolphinscheduler .
+```
+These commands deploy Dolphin Scheduler on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
+
+> **Tip**: List all releases using `helm list`
+
+## Uninstalling the Chart
+
+To uninstall/delete the `dolphinscheduler` deployment:
+
+```bash
+$ helm delete --purge dolphinscheduler
+```
+
+The command removes all the Kubernetes components associated with the chart and deletes the release.
+
+## Configuration
+
+The following tables lists the configurable parameters of the Dolphins Scheduler chart and their default values.
+
+| Parameter | Description | Default |
+| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- |
+| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` |
+| `image.registry` | Docker image registry for the Dolphins Scheduler | `docker.io` |
+| `image.repository` | Docker image repository for the Dolphins Scheduler | `dolphinscheduler` |
+| `image.tag` | Docker image version for the Dolphins Scheduler | `1.2.1` |
+| `image.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` |
+| `imagePullSecrets` | ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images | `[]` |
+| | | |
+| `postgresql.enabled` | If not exists external PostgreSQL, by default, the Dolphins Scheduler will use a internal PostgreSQL | `true` |
+| `postgresql.postgresqlUsername` | The username for internal PostgreSQL | `root` |
+| `postgresql.postgresqlPassword` | The password for internal PostgreSQL | `root` |
+| `postgresql.postgresqlDatabase` | The database for internal PostgreSQL | `dolphinscheduler` |
+| `postgresql.persistence.enabled` | Set `postgresql.persistence.enabled` to `true` to mount a new volume for internal PostgreSQL | `false` |
+| `postgresql.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
+| `postgresql.persistence.storageClass` | PostgreSQL data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `externalDatabase.host` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database host will use it. | `localhost` |
+| `externalDatabase.port` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database port will use it. | `5432` |
+| `externalDatabase.username` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database username will use it. | `root` |
+| `externalDatabase.password` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database password will use it. | `root` |
+| `externalDatabase.database` | If exists external PostgreSQL, and set `postgresql.enable` value to false. Dolphins Scheduler's database database will use it. | `dolphinscheduler` |
+| | | |
+| `zookeeper.enabled` | If not exists external Zookeeper, by default, the Dolphin Scheduler will use a internal Zookeeper | `true` |
+| `zookeeper.taskQueue` | Specify task queue for `master` and `worker` | `zookeeper` |
+| `zookeeper.persistence.enabled` | Set `zookeeper.persistence.enabled` to `true` to mount a new volume for internal Zookeeper | `false` |
+| `zookeeper.persistence.size` | `PersistentVolumeClaim` Size | `20Gi` |
+| `zookeeper.persistence.storageClass` | Zookeeper data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `externalZookeeper.taskQueue` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify task queue for `master` and `worker` | `zookeeper` |
+| `externalZookeeper.zookeeperQuorum` | If exists external Zookeeper, and set `zookeeper.enable` value to false. Specify Zookeeper quorum | `127.0.0.1:2181` |
+| | | |
+| `master.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
+| `master.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
+| `master.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `master.tolerations` | If specified, the pod's tolerations | `{}` |
+| `master.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `master.configmap.MASTER_EXEC_THREADS` | Master execute thread num | `100` |
+| `master.configmap.MASTER_EXEC_TASK_NUM` | Master execute task number in parallel | `20` |
+| `master.configmap.MASTER_HEARTBEAT_INTERVAL` | Master heartbeat interval | `10` |
+| `master.configmap.MASTER_TASK_COMMIT_RETRYTIMES` | Master commit task retry times | `5` |
+| `master.configmap.MASTER_TASK_COMMIT_INTERVAL` | Master commit task interval | `1000` |
+| `master.configmap.MASTER_MAX_CPULOAD_AVG` | Only less than cpu avg load, master server can work. default value : the number of cpu cores * 2 | `100` |
+| `master.configmap.MASTER_RESERVED_MEMORY` | Only larger than reserved memory, master server can work. default value : physical memory * 1/10, unit is G | `0.1` |
+| `master.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `master.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `master.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `master.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `master.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `master.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `master.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `master.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `master.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `master.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `master.persistentVolumeClaim.enabled` | Set `master.persistentVolumeClaim.enabled` to `true` to mount a new volume for `master` | `false` |
+| `master.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `master.persistentVolumeClaim.storageClassName` | `Master` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `master.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `worker.podManagementPolicy` | PodManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down | `Parallel` |
+| `worker.replicas` | Replicas is the desired number of replicas of the given Template | `3` |
+| `worker.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `worker.tolerations` | If specified, the pod's tolerations | `{}` |
+| `worker.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `worker.configmap.WORKER_EXEC_THREADS` | Worker execute thread num | `100` |
+| `worker.configmap.WORKER_HEARTBEAT_INTERVAL` | Worker heartbeat interval | `10` |
+| `worker.configmap.WORKER_FETCH_TASK_NUM` | Submit the number of tasks at a time | `3` |
+| `worker.configmap.WORKER_MAX_CPULOAD_AVG` | Only less than cpu avg load, worker server can work. default value : the number of cpu cores * 2 | `100` |
+| `worker.configmap.WORKER_RESERVED_MEMORY` | Only larger than reserved memory, worker server can work. default value : physical memory * 1/10, unit is G | `0.1` |
+| `worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH` | User data directory path, self configuration, please make sure the directory exists and have read write permissions | `/tmp/dolphinscheduler` |
+| `worker.configmap.DOLPHINSCHEDULER_ENV` | System env path, self configuration, please read `values.yaml` | `[]` |
+| `worker.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `worker.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `worker.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `worker.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `worker.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `worker.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `worker.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `worker.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `worker.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `worker.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `worker.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `worker.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `worker.persistentVolumeClaim.enabled` | Set `worker.persistentVolumeClaim.enabled` to `true` to enable `persistentVolumeClaim` for `worker` | `false` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.dataPersistentVolume.enabled` to `true` to mount a data volume for `worker` | `false` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.storageClassName` | `Worker` data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `worker.persistentVolumeClaim.dataPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.enabled` | Set `worker.persistentVolumeClaim.logsPersistentVolume.enabled` to `true` to mount a logs volume for `worker` | `false` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.storageClassName` | `Worker` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `worker.persistentVolumeClaim.logsPersistentVolume.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `alert.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
+| `alert.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
+| `alert.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
+| `alert.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
+| `alert.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `alert.tolerations` | If specified, the pod's tolerations | `{}` |
+| `alert.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `alert.configmap.XLS_FILE_PATH` | XLS file path | `/tmp/xls` |
+| `alert.configmap.MAIL_SERVER_HOST` | Mail `SERVER HOST ` | `nil` |
+| `alert.configmap.MAIL_SERVER_PORT` | Mail `SERVER PORT` | `nil` |
+| `alert.configmap.MAIL_SENDER` | Mail `SENDER` | `nil` |
+| `alert.configmap.MAIL_USER` | Mail `USER` | `nil` |
+| `alert.configmap.MAIL_PASSWD` | Mail `PASSWORD` | `nil` |
+| `alert.configmap.MAIL_SMTP_STARTTLS_ENABLE` | Mail `SMTP STARTTLS` enable | `false` |
+| `alert.configmap.MAIL_SMTP_SSL_ENABLE` | Mail `SMTP SSL` enable | `false` |
+| `alert.configmap.MAIL_SMTP_SSL_TRUST` | Mail `SMTP SSL TRUST` | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_ENABLE` | `Enterprise Wechat` enable | `false` |
+| `alert.configmap.ENTERPRISE_WECHAT_CORP_ID` | `Enterprise Wechat` corp id | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_SECRET` | `Enterprise Wechat` secret | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_AGENT_ID` | `Enterprise Wechat` agent id | `nil` |
+| `alert.configmap.ENTERPRISE_WECHAT_USERS` | `Enterprise Wechat` users | `nil` |
+| `alert.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `alert.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `alert.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `alert.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `alert.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `alert.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `alert.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `alert.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `alert.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `alert.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `alert.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `alert.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `alert.persistentVolumeClaim.enabled` | Set `alert.persistentVolumeClaim.enabled` to `true` to mount a new volume for `alert` | `false` |
+| `alert.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `alert.persistentVolumeClaim.storageClassName` | `Alert` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `alert.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `api.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
+| `api.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
+| `api.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
+| `api.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
+| `api.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `api.tolerations` | If specified, the pod's tolerations | `{}` |
+| `api.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `api.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `api.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `api.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `api.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `api.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `api.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `api.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `api.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `api.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `api.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `api.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `api.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `api.persistentVolumeClaim.enabled` | Set `api.persistentVolumeClaim.enabled` to `true` to mount a new volume for `api` | `false` |
+| `api.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `api.persistentVolumeClaim.storageClassName` | `api` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `api.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `frontend.strategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate" | `RollingUpdate` |
+| `frontend.strategy.rollingUpdate.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `25%` |
+| `frontend.strategy.rollingUpdate.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `25%` |
+| `frontend.replicas` | Replicas is the desired number of replicas of the given Template | `1` |
+| `frontend.nodeSelector` | NodeSelector is a selector which must be true for the pod to fit on a node | `{}` |
+| `frontend.tolerations` | If specified, the pod's tolerations | `{}` |
+| `frontend.affinity` | If specified, the pod's scheduling constraints | `{}` |
+| `frontend.livenessProbe.enabled` | Turn on and off liveness probe | `true` |
+| `frontend.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` |
+| `frontend.livenessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `frontend.livenessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `frontend.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `frontend.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `frontend.readinessProbe.enabled` | Turn on and off readiness probe | `true` |
+| `frontend.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` |
+| `frontend.readinessProbe.periodSeconds` | How often to perform the probe | `30` |
+| `frontend.readinessProbe.timeoutSeconds` | When the probe times out | `5` |
+| `frontend.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` |
+| `frontend.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` |
+| `frontend.persistentVolumeClaim.enabled` | Set `frontend.persistentVolumeClaim.enabled` to `true` to mount a new volume for `frontend` | `false` |
+| `frontend.persistentVolumeClaim.accessModes` | `PersistentVolumeClaim` Access Modes | `[ReadWriteOnce]` |
+| `frontend.persistentVolumeClaim.storageClassName` | `frontend` logs data Persistent Volume Storage Class. If set to "-", storageClassName: "", which disables dynamic provisioning | `-` |
+| `frontend.persistentVolumeClaim.storage` | `PersistentVolumeClaim` Size | `20Gi` |
+| | | |
+| `ingress.enabled` | Enable ingress | `false` |
+| `ingress.host` | Ingress host | `dolphinscheduler.org` |
+| `ingress.path` | Ingress path | `/` |
+| `ingress.tls.enabled` | Enable ingress tls | `false` |
+| `ingress.tls.hosts` | Ingress tls hosts | `dolphinscheduler.org` |
+| `ingress.tls.secretName` | Ingress tls secret name | `dolphinscheduler-tls` |
+
+For more information please refer to the [chart](https://github.com/apache/incubator-dolphinscheduler.git) documentation.
diff --git a/charts/dolphinscheduler/templates/NOTES.txt b/charts/dolphinscheduler/templates/NOTES.txt
new file mode 100644
index 0000000000..eb3a9cfc52
--- /dev/null
+++ b/charts/dolphinscheduler/templates/NOTES.txt
@@ -0,0 +1,44 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+** Please be patient while the chart is being deployed **
+
+1. Get the Dolphinscheduler URL by running:
+
+{{- if .Values.ingress.enabled }}
+
+ export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
+ echo "Dolphinscheduler URL: http://$HOSTNAME/"
+
+{{- else }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
+
+{{- end }}
+
+2. Get the Dolphinscheduler URL by running:
+
+{{- if .Values.ingress.enabled }}
+
+ export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "dolphinscheduler.fullname" . }} -o jsonpath='{.spec.rules[0].host}')
+ echo "Dolphinscheduler URL: http://$HOSTNAME/"
+
+{{- else }}
+
+ kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "dolphinscheduler.fullname" . }}-frontend 8888:8888
+
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/_helpers.tpl b/charts/dolphinscheduler/templates/_helpers.tpl
new file mode 100644
index 0000000000..37fb034128
--- /dev/null
+++ b/charts/dolphinscheduler/templates/_helpers.tpl
@@ -0,0 +1,149 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+{{/* vim: set filetype=mustache: */}}
+{{/*
+Expand the name of the chart.
+*/}}
+{{- define "dolphinscheduler.name" -}}
+{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified app name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+If release name contains chart name it will be used as a full name.
+*/}}
+{{- define "dolphinscheduler.fullname" -}}
+{{- if .Values.fullnameOverride -}}
+{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- $name := default .Chart.Name .Values.nameOverride -}}
+{{- if contains $name .Release.Name -}}
+{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
+{{- else -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create chart name and version as used by the chart label.
+*/}}
+{{- define "dolphinscheduler.chart" -}}
+{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Common labels
+*/}}
+{{- define "dolphinscheduler.labels" -}}
+helm.sh/chart: {{ include "dolphinscheduler.chart" . }}
+{{ include "dolphinscheduler.selectorLabels" . }}
+{{- if .Chart.AppVersion }}
+app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
+{{- end }}
+app.kubernetes.io/managed-by: {{ .Release.Service }}
+{{- end -}}
+
+{{/*
+Selector labels
+*/}}
+{{- define "dolphinscheduler.selectorLabels" -}}
+app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
+app.kubernetes.io/instance: {{ .Release.Name }}
+{{- end -}}
+
+{{/*
+Create the name of the service account to use
+*/}}
+{{- define "dolphinscheduler.serviceAccountName" -}}
+{{- if .Values.serviceAccount.create -}}
+ {{ default (include "dolphinscheduler.fullname" .) .Values.serviceAccount.name }}
+{{- else -}}
+ {{ default "default" .Values.serviceAccount.name }}
+{{- end -}}
+{{- end -}}
+
+{{/*
+Create a default docker image registry.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.image.registry" -}}
+{{- $registry := default "docker.io" .Values.image.registry -}}
+{{- printf "%s" $registry | trunc 63 | trimSuffix "/" -}}
+{{- end -}}
+
+{{/*
+Create a default docker image repository.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.image.repository" -}}
+{{- printf "%s/%s:%s" (include "dolphinscheduler.image.registry" .) .Values.image.repository .Values.image.tag -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified postgresql name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.postgresql.fullname" -}}
+{{- $name := default "postgresql" .Values.postgresql.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified zookkeeper name.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.zookeeper.fullname" -}}
+{{- $name := default "zookeeper" .Values.zookeeper.nameOverride -}}
+{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default fully qualified zookkeeper quorum.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.zookeeper.quorum" -}}
+{{- $port := default "2181" (.Values.zookeeper.service.port | toString) -}}
+{{- printf "%s:%s" (include "dolphinscheduler.zookeeper.fullname" .) $port | trunc 63 | trimSuffix "-" -}}
+{{- end -}}
+
+{{/*
+Create a default dolphinscheduler worker base dir.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.worker.base.dir" -}}
+{{- $name := default "/tmp/dolphinscheduler" .Values.worker.configmap.DOLPHINSCHEDULER_DATA_BASEDIR_PATH -}}
+{{- printf "%s" $name | trunc 63 | trimSuffix "/" -}}
+{{- end -}}
+
+{{/*
+Create a default dolphinscheduler worker data download dir.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.worker.data.download.dir" -}}
+{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/download" -}}
+{{- end -}}
+
+{{/*
+Create a default dolphinscheduler worker process exec dir.
+We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
+*/}}
+{{- define "dolphinscheduler.worker.process.exec.dir" -}}
+{{- printf "%s%s" (include "dolphinscheduler.worker.base.dir" .) "/exec" -}}
+{{- end -}}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml b/charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
new file mode 100644
index 0000000000..76daad8568
--- /dev/null
+++ b/charts/dolphinscheduler/templates/configmap-dolphinscheduler-alert.yaml
@@ -0,0 +1,41 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.alert.configmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+data:
+ XLS_FILE_PATH: {{ .Values.alert.configmap.XLS_FILE_PATH | quote }}
+ MAIL_SERVER_HOST: {{ .Values.alert.configmap.MAIL_SERVER_HOST | quote }}
+ MAIL_SERVER_PORT: {{ .Values.alert.configmap.MAIL_SERVER_PORT | quote }}
+ MAIL_SENDER: {{ .Values.alert.configmap.MAIL_SENDER | quote }}
+ MAIL_USER: {{ .Values.alert.configmap.MAIL_USER | quote }}
+ MAIL_PASSWD: {{ .Values.alert.configmap.MAIL_PASSWD | quote }}
+ MAIL_SMTP_STARTTLS_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_STARTTLS_ENABLE | quote }}
+ MAIL_SMTP_SSL_ENABLE: {{ .Values.alert.configmap.MAIL_SMTP_SSL_ENABLE | quote }}
+ MAIL_SMTP_SSL_TRUST: {{ .Values.alert.configmap.MAIL_SMTP_SSL_TRUST | quote }}
+ ENTERPRISE_WECHAT_ENABLE: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_ENABLE | quote }}
+ ENTERPRISE_WECHAT_CORP_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_CORP_ID | quote }}
+ ENTERPRISE_WECHAT_SECRET: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_SECRET | quote }}
+ ENTERPRISE_WECHAT_AGENT_ID: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_AGENT_ID | quote }}
+ ENTERPRISE_WECHAT_USERS: {{ .Values.alert.configmap.ENTERPRISE_WECHAT_USERS | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml b/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
new file mode 100644
index 0000000000..8cce068276
--- /dev/null
+++ b/charts/dolphinscheduler/templates/configmap-dolphinscheduler-master.yaml
@@ -0,0 +1,34 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.master.configmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+data:
+ MASTER_EXEC_THREADS: {{ .Values.master.configmap.MASTER_EXEC_THREADS | quote }}
+ MASTER_EXEC_TASK_NUM: {{ .Values.master.configmap.MASTER_EXEC_TASK_NUM | quote }}
+ MASTER_HEARTBEAT_INTERVAL: {{ .Values.master.configmap.MASTER_HEARTBEAT_INTERVAL | quote }}
+ MASTER_TASK_COMMIT_RETRYTIMES: {{ .Values.master.configmap.MASTER_TASK_COMMIT_RETRYTIMES | quote }}
+ MASTER_TASK_COMMIT_INTERVAL: {{ .Values.master.configmap.MASTER_TASK_COMMIT_INTERVAL | quote }}
+ MASTER_MAX_CPULOAD_AVG: {{ .Values.master.configmap.MASTER_MAX_CPULOAD_AVG | quote }}
+ MASTER_RESERVED_MEMORY: {{ .Values.master.configmap.MASTER_RESERVED_MEMORY | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml b/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
new file mode 100644
index 0000000000..be7391fb32
--- /dev/null
+++ b/charts/dolphinscheduler/templates/configmap-dolphinscheduler-worker.yaml
@@ -0,0 +1,39 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.worker.configmap }}
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+data:
+ WORKER_EXEC_THREADS: {{ .Values.worker.configmap.WORKER_EXEC_THREADS | quote }}
+ WORKER_HEARTBEAT_INTERVAL: {{ .Values.worker.configmap.WORKER_HEARTBEAT_INTERVAL | quote }}
+ WORKER_FETCH_TASK_NUM: {{ .Values.worker.configmap.WORKER_FETCH_TASK_NUM | quote }}
+ WORKER_MAX_CPULOAD_AVG: {{ .Values.worker.configmap.WORKER_MAX_CPULOAD_AVG | quote }}
+ WORKER_RESERVED_MEMORY: {{ .Values.worker.configmap.WORKER_RESERVED_MEMORY | quote }}
+ DOLPHINSCHEDULER_DATA_BASEDIR_PATH: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
+ DOLPHINSCHEDULER_DATA_DOWNLOAD_BASEDIR_PATH: {{ include "dolphinscheduler.worker.data.download.dir" . | quote }}
+ DOLPHINSCHEDULER_PROCESS_EXEC_BASEPATH: {{ include "dolphinscheduler.worker.process.exec.dir" . | quote }}
+ dolphinscheduler_env.sh: |-
+ {{- range .Values.worker.configmap.DOLPHINSCHEDULER_ENV }}
+ {{ . }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml b/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
new file mode 100644
index 0000000000..26026f74b3
--- /dev/null
+++ b/charts/dolphinscheduler/templates/deployment-dolphinscheduler-alert.yaml
@@ -0,0 +1,228 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: alert
+spec:
+ replicas: {{ .Values.alert.replicas }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: alert
+ strategy:
+ type: {{ .Values.alert.strategy.type | quote }}
+ rollingUpdate:
+ maxSurge: {{ .Values.alert.strategy.rollingUpdate.maxSurge | quote }}
+ maxUnavailable: {{ .Values.alert.strategy.rollingUpdate.maxUnavailable | quote }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: alert
+ spec:
+ {{- if .Values.alert.affinity }}
+ affinity: {{- toYaml .Values.alert.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alert.nodeSelector }}
+ nodeSelector: {{- toYaml .Values.alert.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.alert.tolerations }}
+ tolerations: {{- toYaml . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ - name: init-postgresql
+ image: busybox:1.31.0
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
+ counter=$((counter+1))
+ if [ $counter == 5 ]; then
+ echo "Error: Couldn't connect to postgresql."
+ exit 1
+ fi
+ echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
+ sleep 60
+ done
+ env:
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ containers:
+ - name: {{ include "dolphinscheduler.fullname" . }}-alert
+ image: {{ include "dolphinscheduler.image.repository" . | quote }}
+ args:
+ - "alert-server"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: TZ
+ value: {{ .Values.timezone }}
+ - name: XLS_FILE_PATH
+ valueFrom:
+ configMapKeyRef:
+ key: XLS_FILE_PATH
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_SERVER_HOST
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_SERVER_HOST
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_SERVER_PORT
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_SERVER_PORT
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_SENDER
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_SENDER
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_USER
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_USER
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_PASSWD
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_PASSWD
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_SMTP_STARTTLS_ENABLE
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_SMTP_STARTTLS_ENABLE
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_SMTP_SSL_ENABLE
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_SMTP_SSL_ENABLE
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: MAIL_SMTP_SSL_TRUST
+ valueFrom:
+ configMapKeyRef:
+ key: MAIL_SMTP_SSL_TRUST
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: ENTERPRISE_WECHAT_ENABLE
+ valueFrom:
+ configMapKeyRef:
+ key: ENTERPRISE_WECHAT_ENABLE
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: ENTERPRISE_WECHAT_CORP_ID
+ valueFrom:
+ configMapKeyRef:
+ key: ENTERPRISE_WECHAT_CORP_ID
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: ENTERPRISE_WECHAT_SECRET
+ valueFrom:
+ configMapKeyRef:
+ key: ENTERPRISE_WECHAT_SECRET
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: ENTERPRISE_WECHAT_AGENT_ID
+ valueFrom:
+ configMapKeyRef:
+ key: ENTERPRISE_WECHAT_AGENT_ID
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: ENTERPRISE_WECHAT_USERS
+ valueFrom:
+ configMapKeyRef:
+ key: ENTERPRISE_WECHAT_USERS
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ - name: POSTGRESQL_USERNAME
+ {{- if .Values.postgresql.enabled }}
+ value: {{ .Values.postgresql.postgresqlUsername }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.username | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.postgresql.enabled }}
+ name: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- else }}
+ name: {{ printf "%s-%s" .Release.Name "externaldb" }}
+ key: db-password
+ {{- end }}
+ {{- if .Values.alert.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - /root/checkpoint.sh
+ - worker-server
+ initialDelaySeconds: {{ .Values.alert.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.alert.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.alert.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.alert.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.alert.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.alert.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - /root/checkpoint.sh
+ - worker-server
+ initialDelaySeconds: {{ .Values.alert.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.alert.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.alert.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.alert.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.alert.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: "/opt/dolphinscheduler/logs"
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ volumes:
+ - name: {{ include "dolphinscheduler.fullname" . }}-alert
+ {{- if .Values.alert.persistentVolumeClaim.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "dolphinscheduler.fullname" . }}-alert
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml b/charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
new file mode 100644
index 0000000000..926ce3c062
--- /dev/null
+++ b/charts/dolphinscheduler/templates/deployment-dolphinscheduler-api.yaml
@@ -0,0 +1,161 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-api
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: api
+spec:
+ replicas: {{ .Values.api.replicas }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: api
+ strategy:
+ type: {{ .Values.api.strategy.type | quote }}
+ rollingUpdate:
+ maxSurge: {{ .Values.api.strategy.rollingUpdate.maxSurge | quote }}
+ maxUnavailable: {{ .Values.api.strategy.rollingUpdate.maxUnavailable | quote }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: api
+ spec:
+ {{- if .Values.api.affinity }}
+ affinity: {{- toYaml .Values.api.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.api.nodeSelector }}
+ nodeSelector: {{- toYaml .Values.api.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.api.tolerations }}
+ tolerations: {{- toYaml . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ - name: init-postgresql
+ image: busybox:1.31.0
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
+ counter=$((counter+1))
+ if [ $counter == 5 ]; then
+ echo "Error: Couldn't connect to postgresql."
+ exit 1
+ fi
+ echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
+ sleep 60
+ done
+ env:
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ containers:
+ - name: {{ include "dolphinscheduler.fullname" . }}-api
+ image: {{ include "dolphinscheduler.image.repository" . | quote }}
+ args:
+ - "api-server"
+ ports:
+ - containerPort: 12345
+ name: tcp-port
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: TZ
+ value: {{ .Values.timezone }}
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ - name: POSTGRESQL_USERNAME
+ {{- if .Values.postgresql.enabled }}
+ value: {{ .Values.postgresql.postgresqlUsername }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.username | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.postgresql.enabled }}
+ name: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- else }}
+ name: {{ printf "%s-%s" .Release.Name "externaldb" }}
+ key: db-password
+ {{- end }}
+ - name: ZOOKEEPER_QUORUM
+ {{- if .Values.zookeeper.enabled }}
+ value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
+ {{- else }}
+ value: {{ .Values.externalZookeeper.zookeeperQuorum }}
+ {{- end }}
+ {{- if .Values.api.livenessProbe.enabled }}
+ livenessProbe:
+ tcpSocket:
+ port: 12345
+ initialDelaySeconds: {{ .Values.api.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.api.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.api.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.api.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.api.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.api.readinessProbe.enabled }}
+ readinessProbe:
+ tcpSocket:
+ port: 12345
+ initialDelaySeconds: {{ .Values.api.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.api.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.api.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.api.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.api.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: "/opt/dolphinscheduler/logs"
+ name: {{ include "dolphinscheduler.fullname" . }}-api
+ volumes:
+ - name: {{ include "dolphinscheduler.fullname" . }}-api
+ {{- if .Values.api.persistentVolumeClaim.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "dolphinscheduler.fullname" . }}-api
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml b/charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
new file mode 100644
index 0000000000..aea09f107f
--- /dev/null
+++ b/charts/dolphinscheduler/templates/deployment-dolphinscheduler-frontend.yaml
@@ -0,0 +1,102 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: frontend
+spec:
+ replicas: {{ .Values.frontend.replicas }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: frontend
+ strategy:
+ type: {{ .Values.frontend.strategy.type | quote }}
+ rollingUpdate:
+ maxSurge: {{ .Values.frontend.strategy.rollingUpdate.maxSurge | quote }}
+ maxUnavailable: {{ .Values.frontend.strategy.rollingUpdate.maxUnavailable | quote }}
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: frontend
+ spec:
+ {{- if .Values.frontend.affinity }}
+ affinity: {{- toYaml .Values.frontend.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.frontend.nodeSelector }}
+ nodeSelector: {{- toYaml .Values.frontend.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.frontend.tolerations }}
+ tolerations: {{- toYaml . | nindent 8 }}
+ {{- end }}
+ containers:
+ - name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ image: {{ include "dolphinscheduler.image.repository" . | quote }}
+ args:
+ - "frontend"
+ ports:
+ - containerPort: 8888
+ name: tcp-port
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: TZ
+ value: {{ .Values.timezone }}
+ - name: FRONTEND_API_SERVER_HOST
+ value: '{{ include "dolphinscheduler.fullname" . }}-api'
+ - name: FRONTEND_API_SERVER_PORT
+ value: "12345"
+ {{- if .Values.frontend.livenessProbe.enabled }}
+ livenessProbe:
+ tcpSocket:
+ port: 8888
+ initialDelaySeconds: {{ .Values.frontend.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.frontend.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.frontend.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.frontend.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.frontend.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.frontend.readinessProbe.enabled }}
+ readinessProbe:
+ tcpSocket:
+ port: 8888
+ initialDelaySeconds: {{ .Values.frontend.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.frontend.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.frontend.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.frontend.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.frontend.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: "/var/log/nginx"
+ name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ volumes:
+ - name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ {{- if .Values.frontend.persistentVolumeClaim.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "dolphinscheduler.fullname" . }}-frontend
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/ingress.yaml b/charts/dolphinscheduler/templates/ingress.yaml
new file mode 100644
index 0000000000..d0f923dcf1
--- /dev/null
+++ b/charts/dolphinscheduler/templates/ingress.yaml
@@ -0,0 +1,43 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.ingress.enabled }}
+apiVersion: networking.k8s.io/v1beta1
+kind: Ingress
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.name" . }}
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ rules:
+ - host: {{ .Values.ingress.host }}
+ http:
+ paths:
+ - path: {{ .Values.ingress.path }}
+ backend:
+ serviceName: {{ include "dolphinscheduler.fullname" . }}-frontend
+ servicePort: tcp-port
+ {{- if .Values.ingress.tls.enabled }}
+ tls:
+ hosts:
+ {{- range .Values.ingress.tls.hosts }}
+ - {{ . | quote }}
+ {{- end }}
+ secretName: {{ .Values.ingress.tls.secretName }}
+ {{- end }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml b/charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
new file mode 100644
index 0000000000..7f74cd94ae
--- /dev/null
+++ b/charts/dolphinscheduler/templates/pvc-dolphinscheduler-alert.yaml
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.alert.persistentVolumeClaim.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-alert
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-alert
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ accessModes:
+ {{- range .Values.alert.persistentVolumeClaim.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ storageClassName: {{ .Values.alert.persistentVolumeClaim.storageClassName | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.alert.persistentVolumeClaim.storage | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml b/charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
new file mode 100644
index 0000000000..c1074cc2b1
--- /dev/null
+++ b/charts/dolphinscheduler/templates/pvc-dolphinscheduler-api.yaml
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.api.persistentVolumeClaim.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-api
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ accessModes:
+ {{- range .Values.api.persistentVolumeClaim.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ storageClassName: {{ .Values.api.persistentVolumeClaim.storageClassName | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.api.persistentVolumeClaim.storage | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml b/charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
new file mode 100644
index 0000000000..ac9fe02a9e
--- /dev/null
+++ b/charts/dolphinscheduler/templates/pvc-dolphinscheduler-frontend.yaml
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if .Values.frontend.persistentVolumeClaim.enabled }}
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ accessModes:
+ {{- range .Values.frontend.persistentVolumeClaim.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ storageClassName: {{ .Values.frontend.persistentVolumeClaim.storageClassName | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.frontend.persistentVolumeClaim.storage | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/secret-external-postgresql.yaml b/charts/dolphinscheduler/templates/secret-external-postgresql.yaml
new file mode 100644
index 0000000000..16d026afc6
--- /dev/null
+++ b/charts/dolphinscheduler/templates/secret-external-postgresql.yaml
@@ -0,0 +1,29 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+{{- if not .Values.postgresql.enabled }}
+apiVersion: v1
+kind: Secret
+metadata:
+ name: {{ printf "%s-%s" .Release.Name "externaldb" }}
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-postgresql
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+type: Opaque
+data:
+ db-password: {{ .Values.externalDatabase.password | b64enc | quote }}
+{{- end }}
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml b/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
new file mode 100644
index 0000000000..ac974128b7
--- /dev/null
+++ b/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-master.yaml
@@ -0,0 +1,247 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: master
+spec:
+ podManagementPolicy: {{ .Values.master.podManagementPolicy }}
+ replicas: {{ .Values.master.replicas }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: master
+ serviceName: {{ template "dolphinscheduler.fullname" . }}-master-headless
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: master
+ spec:
+ {{- if .Values.master.affinity }}
+ affinity: {{- toYaml .Values.master.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.master.nodeSelector }}
+ nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.master.tolerations }}
+ tolerations: {{- toYaml . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ - name: init-zookeeper
+ image: busybox:1.31.0
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
+ while ! nc -z ${line%:*} ${line#*:}; do
+ counter=$((counter+1))
+ if [ $counter == 5 ]; then
+ echo "Error: Couldn't connect to zookeeper."
+ exit 1
+ fi
+ echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
+ sleep 60
+ done
+ done
+ env:
+ - name: ZOOKEEPER_QUORUM
+ {{- if .Values.zookeeper.enabled }}
+ value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
+ {{- else }}
+ value: {{ .Values.externalZookeeper.zookeeperQuorum }}
+ {{- end }}
+ - name: init-postgresql
+ image: busybox:1.31.0
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
+ counter=$((counter+1))
+ if [ $counter == 5 ]; then
+ echo "Error: Couldn't connect to postgresql."
+ exit 1
+ fi
+ echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
+ sleep 60
+ done
+ env:
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ containers:
+ - name: {{ include "dolphinscheduler.fullname" . }}-master
+ image: {{ include "dolphinscheduler.image.repository" . | quote }}
+ args:
+ - "master-server"
+ ports:
+ - containerPort: 8888
+ name: unused-tcp-port
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: TZ
+ value: {{ .Values.timezone }}
+ - name: MASTER_EXEC_THREADS
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_EXEC_THREADS
+ - name: MASTER_EXEC_TASK_NUM
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_EXEC_TASK_NUM
+ - name: MASTER_HEARTBEAT_INTERVAL
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_HEARTBEAT_INTERVAL
+ - name: MASTER_TASK_COMMIT_RETRYTIMES
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_TASK_COMMIT_RETRYTIMES
+ - name: MASTER_TASK_COMMIT_INTERVAL
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_TASK_COMMIT_INTERVAL
+ - name: MASTER_MAX_CPULOAD_AVG
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_MAX_CPULOAD_AVG
+ - name: MASTER_RESERVED_MEMORY
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ key: MASTER_RESERVED_MEMORY
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ - name: POSTGRESQL_USERNAME
+ {{- if .Values.postgresql.enabled }}
+ value: {{ .Values.postgresql.postgresqlUsername }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.username | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.postgresql.enabled }}
+ name: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- else }}
+ name: {{ printf "%s-%s" .Release.Name "externaldb" }}
+ key: db-password
+ {{- end }}
+ - name: TASK_QUEUE
+ {{- if .Values.zookeeper.enabled }}
+ value: {{ .Values.zookeeper.taskQueue }}
+ {{- else }}
+ value: {{ .Values.externalZookeeper.taskQueue }}
+ {{- end }}
+ - name: ZOOKEEPER_QUORUM
+ {{- if .Values.zookeeper.enabled }}
+ value: {{ template "dolphinscheduler.zookeeper.quorum" . }}
+ {{- else }}
+ value: {{ .Values.externalZookeeper.zookeeperQuorum }}
+ {{- end }}
+ {{- if .Values.master.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - /root/checkpoint.sh
+ - master-server
+ initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.master.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.master.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.master.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - /root/checkpoint.sh
+ - master-server
+ initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.master.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.master.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: "/opt/dolphinscheduler/logs"
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ volumes:
+ - name: {{ include "dolphinscheduler.fullname" . }}-master
+ {{- if .Values.master.persistentVolumeClaim.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "dolphinscheduler.fullname" . }}-master
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ {{- if .Values.master.persistentVolumeClaim.enabled }}
+ volumeClaimTemplates:
+ - metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-master
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.master.persistentVolumeClaim.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ storageClassName: {{ .Values.master.persistentVolumeClaim.storageClassName | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.master.persistentVolumeClaim.storage | quote }}
+ {{- end }}
diff --git a/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml b/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
new file mode 100644
index 0000000000..a2407978b4
--- /dev/null
+++ b/charts/dolphinscheduler/templates/statefulset-dolphinscheduler-worker.yaml
@@ -0,0 +1,275 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: worker
+spec:
+ podManagementPolicy: {{ .Values.worker.podManagementPolicy }}
+ replicas: {{ .Values.worker.replicas }}
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: worker
+ serviceName: {{ template "dolphinscheduler.fullname" . }}-worker-headless
+ template:
+ metadata:
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: worker
+ spec:
+ {{- if .Values.worker.affinity }}
+ affinity: {{- toYaml .Values.worker.affinity | nindent 8 }}
+ {{- end }}
+ {{- if .Values.worker.nodeSelector }}
+ nodeSelector: {{- toYaml .Values.worker.nodeSelector | nindent 8 }}
+ {{- end }}
+ {{- if .Values.worker.tolerations }}
+ tolerations: {{- toYaml . | nindent 8 }}
+ {{- end }}
+ initContainers:
+ - name: init-zookeeper
+ image: busybox:1.31.0
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ echo "${ZOOKEEPER_QUORUM}" | awk -F ',' 'BEGIN{ i=1 }{ while( i <= NF ){ print $i; i++ } }' | while read line; do
+ while ! nc -z ${line%:*} ${line#*:}; do
+ counter=$((counter+1))
+ if [ $counter == 5 ]; then
+ echo "Error: Couldn't connect to zookeeper."
+ exit 1
+ fi
+ echo "Trying to connect to zookeeper at ${line}. Attempt $counter."
+ sleep 60
+ done
+ done
+ env:
+ - name: ZOOKEEPER_QUORUM
+ {{- if .Values.zookeeper.enabled }}
+ value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
+ {{- else }}
+ value: {{ .Values.externalZookeeper.zookeeperQuorum }}
+ {{- end }}
+ - name: init-postgresql
+ image: busybox:1.31.0
+ command:
+ - /bin/sh
+ - -ec
+ - |
+ while ! nc -z ${POSTGRESQL_HOST} ${POSTGRESQL_PORT}; do
+ counter=$((counter+1))
+ if [ $counter == 5 ]; then
+ echo "Error: Couldn't connect to postgresql."
+ exit 1
+ fi
+ echo "Trying to connect to postgresql at ${POSTGRESQL_HOST}:${POSTGRESQL_PORT}. Attempt $counter."
+ sleep 60
+ done
+ env:
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ containers:
+ - name: {{ include "dolphinscheduler.fullname" . }}-worker
+ image: {{ include "dolphinscheduler.image.repository" . | quote }}
+ args:
+ - "worker-server"
+ ports:
+ - containerPort: 50051
+ name: "logs-port"
+ imagePullPolicy: {{ .Values.image.pullPolicy }}
+ env:
+ - name: TZ
+ value: {{ .Values.timezone }}
+ - name: WORKER_EXEC_THREADS
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ key: WORKER_EXEC_THREADS
+ - name: WORKER_FETCH_TASK_NUM
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ key: WORKER_FETCH_TASK_NUM
+ - name: WORKER_HEARTBEAT_INTERVAL
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ key: WORKER_HEARTBEAT_INTERVAL
+ - name: WORKER_MAX_CPULOAD_AVG
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ key: WORKER_MAX_CPULOAD_AVG
+ - name: WORKER_RESERVED_MEMORY
+ valueFrom:
+ configMapKeyRef:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ key: WORKER_RESERVED_MEMORY
+ - name: POSTGRESQL_HOST
+ {{- if .Values.postgresql.enabled }}
+ value: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.host | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PORT
+ {{- if .Values.postgresql.enabled }}
+ value: "5432"
+ {{- else }}
+ value: {{ .Values.externalDatabase.port }}
+ {{- end }}
+ - name: POSTGRESQL_USERNAME
+ {{- if .Values.postgresql.enabled }}
+ value: {{ .Values.postgresql.postgresqlUsername }}
+ {{- else }}
+ value: {{ .Values.externalDatabase.username | quote }}
+ {{- end }}
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ {{- if .Values.postgresql.enabled }}
+ name: {{ template "dolphinscheduler.postgresql.fullname" . }}
+ key: postgresql-password
+ {{- else }}
+ name: {{ printf "%s-%s" .Release.Name "externaldb" }}
+ key: db-password
+ {{- end }}
+ - name: TASK_QUEUE
+ {{- if .Values.zookeeper.enabled }}
+ value: {{ .Values.zookeeper.taskQueue }}
+ {{- else }}
+ value: {{ .Values.externalZookeeper.taskQueue }}
+ {{- end }}
+ - name: ZOOKEEPER_QUORUM
+ {{- if .Values.zookeeper.enabled }}
+ value: "{{ template "dolphinscheduler.zookeeper.quorum" . }}"
+ {{- else }}
+ value: {{ .Values.externalZookeeper.zookeeperQuorum }}
+ {{- end }}
+ {{- if .Values.worker.livenessProbe.enabled }}
+ livenessProbe:
+ exec:
+ command:
+ - sh
+ - /root/checkpoint.sh
+ - worker-server
+ initialDelaySeconds: {{ .Values.worker.livenessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.worker.livenessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.worker.livenessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.worker.livenessProbe.successThreshold }}
+ failureThreshold: {{ .Values.worker.livenessProbe.failureThreshold }}
+ {{- end }}
+ {{- if .Values.worker.readinessProbe.enabled }}
+ readinessProbe:
+ exec:
+ command:
+ - sh
+ - /root/checkpoint.sh
+ - worker-server
+ initialDelaySeconds: {{ .Values.worker.readinessProbe.initialDelaySeconds }}
+ periodSeconds: {{ .Values.worker.readinessProbe.periodSeconds }}
+ timeoutSeconds: {{ .Values.worker.readinessProbe.timeoutSeconds }}
+ successThreshold: {{ .Values.worker.readinessProbe.successThreshold }}
+ failureThreshold: {{ .Values.worker.readinessProbe.failureThreshold }}
+ {{- end }}
+ volumeMounts:
+ - mountPath: {{ include "dolphinscheduler.worker.base.dir" . | quote }}
+ name: {{ include "dolphinscheduler.fullname" . }}-worker-data
+ - mountPath: "/opt/dolphinscheduler/logs"
+ name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
+ - mountPath: "/opt/dolphinscheduler/conf/env/dolphinscheduler_env.sh"
+ subPath: "dolphinscheduler_env.sh"
+ name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
+ volumes:
+ - name: {{ include "dolphinscheduler.fullname" . }}-worker-data
+ {{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "dolphinscheduler.fullname" . }}-worker-data
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
+ {{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
+ persistentVolumeClaim:
+ claimName: {{ include "dolphinscheduler.fullname" . }}-worker-logs
+ {{- else }}
+ emptyDir: {}
+ {{- end }}
+ - name: {{ include "dolphinscheduler.fullname" . }}-worker-configmap
+ configMap:
+ defaultMode: 0777
+ name: {{ include "dolphinscheduler.fullname" . }}-worker
+ items:
+ - key: dolphinscheduler_env.sh
+ path: dolphinscheduler_env.sh
+ {{- if .Values.worker.persistentVolumeClaim.enabled }}
+ volumeClaimTemplates:
+ {{- if .Values.worker.persistentVolumeClaim.dataPersistentVolume.enabled }}
+ - metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker-data
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-data
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.worker.persistentVolumeClaim.dataPersistentVolume.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ storageClassName: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.worker.persistentVolumeClaim.dataPersistentVolume.storage | quote }}
+ {{- end }}
+ {{- if .Values.worker.persistentVolumeClaim.logsPersistentVolume.enabled }}
+ - metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-logs
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ spec:
+ accessModes:
+ {{- range .Values.worker.persistentVolumeClaim.logsPersistentVolume.accessModes }}
+ - {{ . | quote }}
+ {{- end }}
+ storageClassName: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }}
+ resources:
+ requests:
+ storage: {{ .Values.worker.persistentVolumeClaim.logsPersistentVolume.storage | quote }}
+ {{- end }}
+ {{- end }}
diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml b/charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
new file mode 100644
index 0000000000..4d07ade242
--- /dev/null
+++ b/charts/dolphinscheduler/templates/svc-dolphinscheduler-api.yaml
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-api
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: 12345
+ targetPort: tcp-port
+ protocol: TCP
+ name: tcp-port
+ selector:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-api
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: api
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml b/charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
new file mode 100644
index 0000000000..60d0d6e7b5
--- /dev/null
+++ b/charts/dolphinscheduler/templates/svc-dolphinscheduler-frontend.yaml
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ ports:
+ - port: 8888
+ targetPort: tcp-port
+ protocol: TCP
+ name: tcp-port
+ selector:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-frontend
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: frontend
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml b/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
new file mode 100644
index 0000000000..7aaf0b4353
--- /dev/null
+++ b/charts/dolphinscheduler/templates/svc-dolphinscheduler-master-headless.yaml
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-master-headless
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master-headless
+ app.kubernetes.io/instance: {{ .Release.Name }}-master-headless
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ clusterIP: "None"
+ ports:
+ - port: 8888
+ targetPort: tcp-port
+ protocol: TCP
+ name: unused-tcp-port
+ selector:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-master
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: master
\ No newline at end of file
diff --git a/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml b/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
new file mode 100644
index 0000000000..3e92a349d4
--- /dev/null
+++ b/charts/dolphinscheduler/templates/svc-dolphinscheduler-worker-headless.yaml
@@ -0,0 +1,36 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+apiVersion: v1
+kind: Service
+metadata:
+ name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
+ labels:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker-headless
+ app.kubernetes.io/instance: {{ .Release.Name }}-worker-headless
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+spec:
+ clusterIP: "None"
+ ports:
+ - port: 50051
+ targetPort: logs-port
+ protocol: TCP
+ name: logs-port
+ selector:
+ app.kubernetes.io/name: {{ include "dolphinscheduler.fullname" . }}-worker
+ app.kubernetes.io/instance: {{ .Release.Name }}
+ app.kubernetes.io/managed-by: {{ .Release.Service }}
+ app.kubernetes.io/component: worker
\ No newline at end of file
diff --git a/charts/dolphinscheduler/values.yaml b/charts/dolphinscheduler/values.yaml
new file mode 100644
index 0000000000..962a031a0c
--- /dev/null
+++ b/charts/dolphinscheduler/values.yaml
@@ -0,0 +1,355 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Default values for dolphinscheduler-chart.
+# This is a YAML-formatted file.
+# Declare variables to be passed into your templates.
+
+nameOverride: ""
+fullnameOverride: ""
+
+timezone: "Asia/Shanghai"
+
+image:
+ registry: "docker.io"
+ repository: "dolphinscheduler"
+ tag: "1.2.1"
+ pullPolicy: "IfNotPresent"
+
+imagePullSecrets: []
+
+# If not exists external postgresql, by default, Dolphinscheduler's database will use it.
+postgresql:
+ enabled: true
+ postgresqlUsername: "root"
+ postgresqlPassword: "root"
+ postgresqlDatabase: "dolphinscheduler"
+ persistence:
+ enabled: false
+ size: "20Gi"
+ storageClass: "-"
+
+# If exists external postgresql, and set postgresql.enable value to false.
+# If postgresql.enable is false, Dolphinscheduler's database will use it.
+externalDatabase:
+ host: "localhost"
+ port: "5432"
+ username: "root"
+ password: "root"
+ database: "dolphinscheduler"
+
+# If not exists external zookeeper, by default, Dolphinscheduler's zookeeper will use it.
+zookeeper:
+ enabled: true
+ taskQueue: "zookeeper"
+ persistence:
+ enabled: false
+ size: "20Gi"
+ storageClass: "-"
+
+# If exists external zookeeper, and set zookeeper.enable value to false.
+# If zookeeper.enable is false, Dolphinscheduler's zookeeper will use it.
+externalZookeeper:
+ taskQueue: "zookeeper"
+ zookeeperQuorum: "127.0.0.1:2181"
+
+master:
+ podManagementPolicy: "Parallel"
+ replicas: "3"
+ # NodeSelector is a selector which must be true for the pod to fit on a node.
+ # Selector which must match a node's labels for the pod to be scheduled on that node.
+ # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ nodeSelector: {}
+ # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
+ # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
+ tolerations: []
+ # Affinity is a group of affinity scheduling rules.
+ # If specified, the pod's scheduling constraints.
+ # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
+ affinity: {}
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ configmap:
+ MASTER_EXEC_THREADS: "100"
+ MASTER_EXEC_TASK_NUM: "20"
+ MASTER_HEARTBEAT_INTERVAL: "10"
+ MASTER_TASK_COMMIT_RETRYTIMES: "5"
+ MASTER_TASK_COMMIT_INTERVAL: "1000"
+ MASTER_MAX_CPULOAD_AVG: "100"
+ MASTER_RESERVED_MEMORY: "0.1"
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
+ ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
+ ## A claim in this list takes precedence over any volumes in the template, with the same name.
+ persistentVolumeClaim:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "-"
+ storage: "20Gi"
+
+worker:
+ podManagementPolicy: "Parallel"
+ replicas: "3"
+ # NodeSelector is a selector which must be true for the pod to fit on a node.
+ # Selector which must match a node's labels for the pod to be scheduled on that node.
+ # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ nodeSelector: {}
+ # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
+ # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
+ tolerations: []
+ # Affinity is a group of affinity scheduling rules.
+ # If specified, the pod's scheduling constraints.
+ # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
+ affinity: {}
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ configmap:
+ WORKER_EXEC_THREADS: "100"
+ WORKER_HEARTBEAT_INTERVAL: "10"
+ WORKER_FETCH_TASK_NUM: "3"
+ WORKER_MAX_CPULOAD_AVG: "100"
+ WORKER_RESERVED_MEMORY: "0.1"
+ DOLPHINSCHEDULER_DATA_BASEDIR_PATH: "/tmp/dolphinscheduler"
+ DOLPHINSCHEDULER_ENV:
+ - "export HADOOP_HOME=/opt/soft/hadoop"
+ - "export HADOOP_CONF_DIR=/opt/soft/hadoop/etc/hadoop"
+ - "export SPARK_HOME1=/opt/soft/spark1"
+ - "export SPARK_HOME2=/opt/soft/spark2"
+ - "export PYTHON_HOME=/opt/soft/python"
+ - "export JAVA_HOME=/opt/soft/java"
+ - "export HIVE_HOME=/opt/soft/hive"
+ - "export FLINK_HOME=/opt/soft/flink"
+ - "export PATH=$HADOOP_HOME/bin:$SPARK_HOME1/bin:$SPARK_HOME2/bin:$PYTHON_HOME:$JAVA_HOME/bin:$HIVE_HOME/bin:$FLINK_HOME/bin:$PATH"
+ ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
+ ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
+ ## A claim in this list takes precedence over any volumes in the template, with the same name.
+ persistentVolumeClaim:
+ enabled: false
+ ## dolphinscheduler data volume
+ dataPersistentVolume:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "-"
+ storage: "20Gi"
+ ## dolphinscheduler logs volume
+ logsPersistentVolume:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "-"
+ storage: "20Gi"
+
+alert:
+ strategy:
+ type: "RollingUpdate"
+ rollingUpdate:
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ replicas: "1"
+ # NodeSelector is a selector which must be true for the pod to fit on a node.
+ # Selector which must match a node's labels for the pod to be scheduled on that node.
+ # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ nodeSelector: {}
+ # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
+ # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
+ tolerations: []
+ # Affinity is a group of affinity scheduling rules.
+ # If specified, the pod's scheduling constraints.
+ # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
+ affinity: {}
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ configmap:
+ XLS_FILE_PATH: "/tmp/xls"
+ MAIL_SERVER_HOST: ""
+ MAIL_SERVER_PORT: ""
+ MAIL_SENDER: ""
+ MAIL_USER: ""
+ MAIL_PASSWD: ""
+ MAIL_SMTP_STARTTLS_ENABLE: false
+ MAIL_SMTP_SSL_ENABLE: false
+ MAIL_SMTP_SSL_TRUST: ""
+ ENTERPRISE_WECHAT_ENABLE: false
+ ENTERPRISE_WECHAT_CORP_ID: ""
+ ENTERPRISE_WECHAT_SECRET: ""
+ ENTERPRISE_WECHAT_AGENT_ID: ""
+ ENTERPRISE_WECHAT_USERS: ""
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
+ ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
+ ## A claim in this list takes precedence over any volumes in the template, with the same name.
+ persistentVolumeClaim:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "-"
+ storage: "20Gi"
+
+api:
+ strategy:
+ type: "RollingUpdate"
+ rollingUpdate:
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ replicas: "1"
+ # NodeSelector is a selector which must be true for the pod to fit on a node.
+ # Selector which must match a node's labels for the pod to be scheduled on that node.
+ # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ nodeSelector: {}
+ # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
+ # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
+ tolerations: []
+ # Affinity is a group of affinity scheduling rules.
+ # If specified, the pod's scheduling constraints.
+ # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
+ affinity: {}
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
+ ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
+ ## A claim in this list takes precedence over any volumes in the template, with the same name.
+ persistentVolumeClaim:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "-"
+ storage: "20Gi"
+
+frontend:
+ strategy:
+ type: "RollingUpdate"
+ rollingUpdate:
+ maxSurge: "25%"
+ maxUnavailable: "25%"
+ replicas: "1"
+ # NodeSelector is a selector which must be true for the pod to fit on a node.
+ # Selector which must match a node's labels for the pod to be scheduled on that node.
+ # More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+ nodeSelector: {}
+ # Tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission,
+ # effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.
+ tolerations: []
+ # Affinity is a group of affinity scheduling rules.
+ # If specified, the pod's scheduling constraints.
+ # More info: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#affinity-v1-core
+ affinity: {}
+ ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ livenessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated.
+ ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
+ readinessProbe:
+ enabled: true
+ initialDelaySeconds: "30"
+ periodSeconds: "30"
+ timeoutSeconds: "5"
+ failureThreshold: "3"
+ successThreshold: "1"
+ ## volumeClaimTemplates is a list of claims that pods are allowed to reference.
+ ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod.
+ ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template.
+ ## A claim in this list takes precedence over any volumes in the template, with the same name.
+ persistentVolumeClaim:
+ enabled: false
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "-"
+ storage: "20Gi"
+
+ingress:
+ enabled: false
+ host: "dolphinscheduler.org"
+ path: "/"
+ tls:
+ enabled: false
+ hosts:
+ - "dolphinscheduler.org"
+ secretName: "dolphinscheduler-tls"
\ No newline at end of file
From 6cf7e6c1157749c184c2fd00d64d6cd30811746b Mon Sep 17 00:00:00 2001
From: liwenhe1993 <32166572+liwenhe1993@users.noreply.github.com>
Date: Sat, 28 Mar 2020 22:07:49 +0800
Subject: [PATCH 38/58] Remove .helmignore file (#2328)
---
charts/dolphinscheduler/.helmignore | 23 -----------------------
1 file changed, 23 deletions(-)
delete mode 100644 charts/dolphinscheduler/.helmignore
diff --git a/charts/dolphinscheduler/.helmignore b/charts/dolphinscheduler/.helmignore
deleted file mode 100644
index 0e8a0eb36f..0000000000
--- a/charts/dolphinscheduler/.helmignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Patterns to ignore when building packages.
-# This supports shell glob matching, relative path matching, and
-# negation (prefixed with !). Only one pattern per line.
-.DS_Store
-# Common VCS dirs
-.git/
-.gitignore
-.bzr/
-.bzrignore
-.hg/
-.hgignore
-.svn/
-# Common backup files
-*.swp
-*.bak
-*.tmp
-*.orig
-*~
-# Various IDEs
-.project
-.idea/
-*.tmproj
-.vscode/
From 526e5c91f112398bf920d420a7043477b442aa87 Mon Sep 17 00:00:00 2001
From: liwenhe1993 <32166572+liwenhe1993@users.noreply.github.com>
Date: Sat, 28 Mar 2020 23:28:35 +0800
Subject: [PATCH 39/58] Connection mode of adding Sid to Oracle (#2254)
* Connection mode of adding Sid to Oracle
* Remove code
* Add asf
* Add unit test
* Add unit test
* Add unit test
* solve the conflict
---
.../api/controller/DataSourceController.java | 33 ++++++++------
.../api/service/DataSourceService.java | 32 +++++++++-----
.../controller/DataSourceControllerTest.java | 4 ++
.../dolphinscheduler/common/Constants.java | 3 +-
.../common/enums/DbConnectType.java | 44 +++++++++++++++++++
.../dao/datasource/OracleDataSource.java | 15 +++++++
.../pages/list/_source/createDataSource.vue | 24 +++++++++-
.../src/js/module/i18n/locale/en_US.js | 3 ++
.../src/js/module/i18n/locale/zh_CN.js | 3 ++
pom.xml | 1 +
10 files changed, 137 insertions(+), 25 deletions(-)
create mode 100644 dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
index 881c93f2f7..89e6134609 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/controller/DataSourceController.java
@@ -16,18 +16,19 @@
*/
package org.apache.dolphinscheduler.api.controller;
+import io.swagger.annotations.Api;
+import io.swagger.annotations.ApiImplicitParam;
+import io.swagger.annotations.ApiImplicitParams;
+import io.swagger.annotations.ApiOperation;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.service.DataSourceService;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
+import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.ParameterUtils;
import org.apache.dolphinscheduler.dao.entity.User;
-import io.swagger.annotations.Api;
-import io.swagger.annotations.ApiImplicitParam;
-import io.swagger.annotations.ApiImplicitParams;
-import io.swagger.annotations.ApiOperation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
@@ -76,6 +77,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
+ @ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/create")
@@ -90,11 +92,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
+ @RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
- logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {},port: {},database : {},principal: {},userName : {} other: {}",
- loginUser.getUserName(), name, note, type, host,port,database,principal,userName,other);
+ logger.info("login user {} create datasource name: {}, note: {}, type: {}, host: {}, port: {}, database : {}, principal: {}, userName : {}, connectType: {}, other: {}",
+ loginUser.getUserName(), name, note, type, host, port, database, principal, userName, connectType, other);
try {
- String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
+ String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Map result = dataSourceService.createDataSource(loginUser, name, note, type, parameter);
return returnDataList(result);
@@ -133,6 +136,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
+ @ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/update")
@@ -148,11 +152,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
+ @RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
- logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, other: {}",
- loginUser.getUserName(), name, note, type, other);
+ logger.info("login user {} updateProcessInstance datasource name: {}, note: {}, type: {}, connectType: {}, other: {}",
+ loginUser.getUserName(), name, note, type, connectType, other);
try {
- String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, other);
+ String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal, userName, password, connectType, other);
Map dataSource = dataSourceService.updateDataSource(id, loginUser, name, note, type, parameter);
return returnDataList(dataSource);
} catch (Exception e) {
@@ -277,6 +282,7 @@ public class DataSourceController extends BaseController {
@ApiImplicitParam(name = "database", value = "DATABASE_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "userName", value = "USER_NAME",required = true, dataType ="String"),
@ApiImplicitParam(name = "password", value = "PASSWORD", dataType ="String"),
+ @ApiImplicitParam(name = "connectType", value = "CONNECT_TYPE", dataType = "DbConnectType"),
@ApiImplicitParam(name = "other", value = "DATA_SOURCE_OTHER", dataType ="String")
})
@PostMapping(value = "/connect")
@@ -291,11 +297,12 @@ public class DataSourceController extends BaseController {
@RequestParam(value = "principal") String principal,
@RequestParam(value = "userName") String userName,
@RequestParam(value = "password") String password,
+ @RequestParam(value = "connectType") DbConnectType connectType,
@RequestParam(value = "other") String other) {
- logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, other: {}",
- loginUser.getUserName(), name, note, type, other);
+ logger.info("login user {}, connect datasource: {} failure, note: {}, type: {}, connectType: {}, other: {}",
+ loginUser.getUserName(), name, note, type, connectType, other);
try {
- String parameter = dataSourceService.buildParameter(name, note, type, host, port, database,principal,userName, password, other);
+ String parameter = dataSourceService.buildParameter(name, note, type, host, port, database, principal, userName, password, connectType, other);
Boolean isConnection = dataSourceService.checkConnection(type, parameter);
Result result = new Result();
diff --git a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
index 6a732fed0e..afa13b7414 100644
--- a/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
+++ b/dolphinscheduler-api/src/main/java/org/apache/dolphinscheduler/api/service/DataSourceService.java
@@ -17,10 +17,15 @@
package org.apache.dolphinscheduler.api.service;
import com.alibaba.fastjson.JSON;
+import com.alibaba.fastjson.JSONObject;
+import com.alibaba.fastjson.TypeReference;
+import com.baomidou.mybatisplus.core.metadata.IPage;
+import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.dolphinscheduler.api.enums.Status;
import org.apache.dolphinscheduler.api.utils.PageInfo;
import org.apache.dolphinscheduler.api.utils.Result;
import org.apache.dolphinscheduler.common.Constants;
+import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
import org.apache.dolphinscheduler.common.utils.CommonUtils;
import org.apache.dolphinscheduler.common.utils.JSONUtils;
@@ -30,10 +35,6 @@ import org.apache.dolphinscheduler.dao.entity.Resource;
import org.apache.dolphinscheduler.dao.entity.User;
import org.apache.dolphinscheduler.dao.mapper.DataSourceMapper;
import org.apache.dolphinscheduler.dao.mapper.DataSourceUserMapper;
-import com.alibaba.fastjson.JSONObject;
-import com.alibaba.fastjson.TypeReference;
-import com.baomidou.mybatisplus.core.metadata.IPage;
-import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
@@ -473,12 +474,19 @@ public class DataSourceService extends BaseService{
* @return datasource parameter
*/
public String buildParameter(String name, String desc, DbType type, String host,
- String port, String database,String principal,String userName,
- String password, String other) {
+ String port, String database, String principal, String userName,
+ String password, DbConnectType connectType, String other) {
+
+ String address = buildAddress(type, host, port, connectType);
- String address = buildAddress(type, host, port);
+ String jdbcUrl;
+ if (Constants.ORACLE.equals(type.name())
+ && connectType == DbConnectType.ORACLE_SID) {
+ jdbcUrl = address + ":" + database;
+ } else {
+ jdbcUrl = address + "/" + database;
+ }
- String jdbcUrl = address + "/" + database;
if (CommonUtils.getKerberosStartupState() &&
(type == DbType.HIVE || type == DbType.SPARK)){
jdbcUrl += ";principal=" + principal;
@@ -531,7 +539,7 @@ public class DataSourceService extends BaseService{
}
- private String buildAddress(DbType type, String host, String port) {
+ private String buildAddress(DbType type, String host, String port, DbConnectType connectType) {
StringBuilder sb = new StringBuilder();
if (Constants.MYSQL.equals(type.name())) {
sb.append(Constants.JDBC_MYSQL);
@@ -552,7 +560,11 @@ public class DataSourceService extends BaseService{
sb.append(Constants.JDBC_CLICKHOUSE);
sb.append(host).append(":").append(port);
} else if (Constants.ORACLE.equals(type.name())) {
- sb.append(Constants.JDBC_ORACLE);
+ if (connectType == DbConnectType.ORACLE_SID) {
+ sb.append(Constants.JDBC_ORACLE_SID);
+ } else {
+ sb.append(Constants.JDBC_ORACLE_SERVICE_NAME);
+ }
sb.append(host).append(":").append(port);
} else if (Constants.SQLSERVER.equals(type.name())) {
sb.append(Constants.JDBC_SQLSERVER);
diff --git a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java
index f80ce8556e..5ed7310c47 100644
--- a/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java
+++ b/dolphinscheduler-api/src/test/java/org/apache/dolphinscheduler/api/controller/DataSourceControllerTest.java
@@ -39,6 +39,7 @@ import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.
* data source controller test
*/
public class DataSourceControllerTest extends AbstractControllerTest{
+
private static Logger logger = LoggerFactory.getLogger(DataSourceControllerTest.class);
@Ignore
@@ -95,6 +96,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
+ @Ignore
@Test
public void testQueryDataSource() throws Exception {
MultiValueMap paramsMap = new LinkedMultiValueMap<>();
@@ -169,6 +171,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
}
+ @Ignore
@Test
public void testConnectionTest() throws Exception {
MultiValueMap paramsMap = new LinkedMultiValueMap<>();
@@ -248,6 +251,7 @@ public class DataSourceControllerTest extends AbstractControllerTest{
+ @Ignore
@Test
public void testDelete() throws Exception {
MultiValueMap paramsMap = new LinkedMultiValueMap<>();
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
index 73655e7a9d..c46635c1e8 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/Constants.java
@@ -972,7 +972,8 @@ public final class Constants {
public static final String JDBC_POSTGRESQL = "jdbc:postgresql://";
public static final String JDBC_HIVE_2 = "jdbc:hive2://";
public static final String JDBC_CLICKHOUSE = "jdbc:clickhouse://";
- public static final String JDBC_ORACLE = "jdbc:oracle:thin:@//";
+ public static final String JDBC_ORACLE_SID = "jdbc:oracle:thin:@";
+ public static final String JDBC_ORACLE_SERVICE_NAME = "jdbc:oracle:thin:@//";
public static final String JDBC_SQLSERVER = "jdbc:sqlserver://";
public static final String JDBC_DB2 = "jdbc:db2://";
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java
new file mode 100644
index 0000000000..ef0f454ff6
--- /dev/null
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/enums/DbConnectType.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.dolphinscheduler.common.enums;
+
+import com.baomidou.mybatisplus.annotation.EnumValue;
+
+public enum DbConnectType {
+
+ ORACLE_SERVICE_NAME(0, "Oracle Service Name"),
+ ORACLE_SID(1, "Oracle SID");
+
+ DbConnectType(int code, String descp) {
+ this.code = code;
+ this.descp = descp;
+ }
+
+ @EnumValue
+ private final int code;
+
+ private final String descp;
+
+ public int getCode() {
+ return code;
+ }
+
+ public String getDescp() {
+ return descp;
+ }
+
+}
diff --git a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java
index 0ebecb49f7..879219c386 100644
--- a/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java
+++ b/dolphinscheduler-dao/src/main/java/org/apache/dolphinscheduler/dao/datasource/OracleDataSource.java
@@ -17,13 +17,28 @@
package org.apache.dolphinscheduler.dao.datasource;
import org.apache.dolphinscheduler.common.Constants;
+import org.apache.dolphinscheduler.common.enums.DbConnectType;
import org.apache.dolphinscheduler.common.enums.DbType;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* data source of Oracle
*/
public class OracleDataSource extends BaseDataSource {
+ private static final Logger logger = LoggerFactory.getLogger(OracleDataSource.class);
+
+ private DbConnectType type;
+
+ public DbConnectType getType() {
+ return type;
+ }
+
+ public void setType(DbConnectType type) {
+ this.type = type;
+ }
+
/**
* @return driver class
*/
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/_source/createDataSource.vue b/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/_source/createDataSource.vue
index 001535b8fb..db99d00a0c 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/_source/createDataSource.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/datasource/pages/list/_source/createDataSource.vue
@@ -128,6 +128,15 @@
+
+ * {{$t('Oracle Connect Type')}}
+
+
+ {{$t('Oracle Service Name')}}
+ {{$t('Oracle SID')}}
+
+
+
{{$t('jdbc connect parameters')}}
@@ -152,7 +161,7 @@
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
index ad33503532..f7639bb959 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
@@ -47,9 +47,9 @@
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
index ddb097ee0f..362a3f6727 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
@@ -128,7 +128,6 @@
import { downloadFile } from '@/module/download'
import { bytesToSize } from '@/module/util/util'
import localStore from '@/module/util/localStorage'
-
export default {
name: 'udf-manage-list',
data () {
@@ -215,4 +214,4 @@
},
components: { }
}
-
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue
index 69acdef4a5..359a16a29f 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue
@@ -48,9 +48,9 @@
+
\ No newline at end of file
From 2b17435337a11ee13d10103e9b27ef1f763f5f56 Mon Sep 17 00:00:00 2001
From: break60 <790061044@qq.com>
Date: Tue, 31 Mar 2020 11:11:50 +0800
Subject: [PATCH 46/58] Supplemental licenses and repair resources cannot be
renamed (#2340)
* Change DOM label
* Change name to lowercase
* Limit customization file content to no more than 3000 lines
* dd branch flow node verification
* datax
* datax add custom
* Change normalize.scss import method and animation.scss license modification
* Resource tree code merge
* Modify ans-ui version and timing management style
* Supplemental licenses and repair resources cannot be renamed
---
dolphinscheduler-dist/release-docs/LICENSE | 2 ++
.../licenses/ui-licenses/LICENSE-normalize | 8 ++++++++
.../ui-licenses/LICENSE-vue-treeselect | 20 +++++++++++++++++++
.../pages/file/pages/list/_source/list.vue | 3 +--
.../pages/file/pages/list/_source/rename.vue | 7 +++----
.../pages/udf/pages/resource/_source/list.vue | 3 +--
.../udf/pages/resource/_source/rename.vue | 6 +++---
7 files changed, 38 insertions(+), 11 deletions(-)
create mode 100644 dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-normalize
create mode 100644 dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-vue-treeselect
diff --git a/dolphinscheduler-dist/release-docs/LICENSE b/dolphinscheduler-dist/release-docs/LICENSE
index 97946d1172..82e641ec72 100644
--- a/dolphinscheduler-dist/release-docs/LICENSE
+++ b/dolphinscheduler-dist/release-docs/LICENSE
@@ -518,6 +518,8 @@ MIT licenses
js-cookie 2.2.1: https://github.com/js-cookie/js-cookie MIT
jsplumb 2.8.6: https://github.com/jsplumb/jsplumb MIT and GPLv2
lodash 4.17.11: https://github.com/lodash/lodash MIT
+ normalize.css 8.0.1: https://github.com/necolas/normalize.css MIT
+ vue-treeselect 0.4.0: https://github.com/riophae/vue-treeselect MIT
vue 2.5.17: https://github.com/vuejs/vue MIT
vue-router 2.7.0: https://github.com/vuejs/vue-router MIT
vuex 3.0.0: https://github.com/vuejs/vuex MIT
diff --git a/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-normalize b/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-normalize
new file mode 100644
index 0000000000..90e0c091a5
--- /dev/null
+++ b/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-normalize
@@ -0,0 +1,8 @@
+The MIT License (MIT)
+Copyright © Nicolas Gallagher and Jonathan Neal
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-vue-treeselect b/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-vue-treeselect
new file mode 100644
index 0000000000..f7d8cc3ebd
--- /dev/null
+++ b/dolphinscheduler-dist/release-docs/licenses/ui-licenses/LICENSE-vue-treeselect
@@ -0,0 +1,20 @@
+Copyright (c) 2017-present Riophae Lee
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue
index 3bc1bfac24..4ccfa2eff3 100755
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/list.vue
@@ -134,7 +134,6 @@
import { bytesToSize } from '@/module/util/util'
import { downloadFile } from '@/module/download'
import localStore from '@/module/util/localStorage'
-
export default {
name: 'file-manage-list',
data () {
@@ -238,4 +237,4 @@
},
components: { }
}
-
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
index ad33503532..f7639bb959 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/file/pages/list/_source/rename.vue
@@ -47,9 +47,9 @@
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
index ddb097ee0f..362a3f6727 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/list.vue
@@ -128,7 +128,6 @@
import { downloadFile } from '@/module/download'
import { bytesToSize } from '@/module/util/util'
import localStore from '@/module/util/localStorage'
-
export default {
name: 'udf-manage-list',
data () {
@@ -215,4 +214,4 @@
},
components: { }
}
-
+
\ No newline at end of file
diff --git a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue
index 69acdef4a5..359a16a29f 100644
--- a/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue
+++ b/dolphinscheduler-ui/src/js/conf/home/pages/resource/pages/udf/pages/resource/_source/rename.vue
@@ -48,9 +48,9 @@
+
\ No newline at end of file
From 718e4b15d2484f7d94d96b8110025a0fa07d86c4 Mon Sep 17 00:00:00 2001
From: "gabry.wu"
Date: Tue, 31 Mar 2020 12:13:34 +0800
Subject: [PATCH 47/58] Adapting partial code(file name start with O) to the
sonar cloud rule (#2259)
* Adapting partial code(file name start with O) to the sonar cloud rule
* resolve conflict with dev branch
Co-authored-by: dailidong
---
.../apache/dolphinscheduler/common/utils/OSUtils.java | 10 ++--------
.../apache/dolphinscheduler/common/os/OSUtilsTest.java | 2 +-
2 files changed, 3 insertions(+), 9 deletions(-)
diff --git a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
index a0fea8d33c..4df09d1c15 100644
--- a/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
+++ b/dolphinscheduler-common/src/main/java/org/apache/dolphinscheduler/common/utils/OSUtils.java
@@ -352,13 +352,7 @@ public class OSUtils {
return sb.toString();
} finally {
- if (br != null) {
- try {
- br.close();
- } catch (Exception e) {
- logger.error(e.getMessage(), e);
- }
- }
+ IOUtils.closeQuietly(br);
}
}
@@ -408,7 +402,7 @@ public class OSUtils {
* whether is windows
* @return true if windows
*/
- public static boolean isWindows() { ;
+ public static boolean isWindows() {
return getOSName().startsWith("Windows");
}
diff --git a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/os/OSUtilsTest.java b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/os/OSUtilsTest.java
index 2670eebc20..1815e48f84 100644
--- a/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/os/OSUtilsTest.java
+++ b/dolphinscheduler-common/src/test/java/org/apache/dolphinscheduler/common/os/OSUtilsTest.java
@@ -67,7 +67,7 @@ public class OSUtilsTest {
@Test
public void cpuUsage() throws Exception {
logger.info("cpuUsage : {}", OSUtils.cpuUsage());
- Thread.sleep(1000l);
+ Thread.sleep(1000L);
logger.info("cpuUsage : {}", OSUtils.cpuUsage());
double cpuUsage = OSUtils.cpuUsage();
From 4db2ac232a32d286164e1f33f030130d4696ed16 Mon Sep 17 00:00:00 2001
From: BoYiZhang <39816903+BoYiZhang@users.noreply.github.com>
Date: Tue, 31 Mar 2020 21:08:59 +0800
Subject: [PATCH 48/58] Remove invalid code (#2342)
Co-authored-by: zhanglong
Co-authored-by: dailidong