commit
ae9791d500
|
|
@ -17,7 +17,27 @@ jobs:
|
|||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
build-npm:
|
||||
name: build-npm
|
||||
runs-on:
|
||||
- self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- shell: bash
|
||||
name: npm install
|
||||
run: |
|
||||
ci/bin/build.sh
|
||||
- name: upload build
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
#path: node_modules/
|
||||
path: node_modules.tar.gz
|
||||
retention-days: 7
|
||||
|
||||
csi-sanity-synology:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
|
@ -28,6 +48,9 @@ jobs:
|
|||
- csi-sanity-synology
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -41,58 +64,103 @@ jobs:
|
|||
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
|
||||
|
||||
# api-based drivers
|
||||
csi-sanity-truenas-scale:
|
||||
csi-sanity-truenas-scale-22_02:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- truenas/scale-iscsi.yaml
|
||||
- truenas/scale-nfs.yaml
|
||||
- truenas/scale/22.02/scale-iscsi.yaml
|
||||
- truenas/scale/22.02/scale-nfs.yaml
|
||||
# 80 char limit
|
||||
#- truenas/scale-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-truenas-scale
|
||||
- csi-sanity-zfs-local
|
||||
#- csi-sanity-truenas-scale
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_SCALE_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_SCALE_PASSWORD }}
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_22_02_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||
|
||||
# ssh-based drivers
|
||||
csi-sanity-truenas-core:
|
||||
csi-sanity-truenas-core-12_0:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# 63 char limit
|
||||
#- truenas/core-iscsi.yaml
|
||||
- truenas/core-nfs.yaml
|
||||
- truenas/core/12.0/core-nfs.yaml
|
||||
# 80 char limit
|
||||
#- truenas/core-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-truenas-core
|
||||
- csi-sanity-zfs-local
|
||||
#- csi-sanity-truenas-core
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_CORE_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_CORE_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_CORE_PASSWORD }}
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_CORE_12_0_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||
|
||||
# ssh-based drivers
|
||||
csi-sanity-truenas-core-13_0:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- truenas/core/13.0/core-iscsi.yaml
|
||||
- truenas/core/13.0/core-nfs.yaml
|
||||
# 80 char limit
|
||||
#- truenas/core-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-zfs-local
|
||||
#- csi-sanity-truenas-core
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_CORE_13_0_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||
|
||||
# ssh-based drivers
|
||||
csi-sanity-zfs-generic:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
|
@ -104,6 +172,9 @@ jobs:
|
|||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -114,12 +185,64 @@ jobs:
|
|||
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
|
||||
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
|
||||
|
||||
build:
|
||||
# zfs-local drivers
|
||||
csi-sanity-zfs-local:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- zfs-local/zvol.yaml
|
||||
- zfs-local/dataset.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-zfs-local
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
|
||||
# local-hostpath driver
|
||||
csi-sanity-local-hostpath:
|
||||
needs:
|
||||
- build-npm
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- local-hostpath/basic.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
|
||||
|
||||
build-docker:
|
||||
needs:
|
||||
- csi-sanity-synology
|
||||
- csi-sanity-truenas-scale
|
||||
- csi-sanity-truenas-core
|
||||
- csi-sanity-truenas-scale-22_02
|
||||
- csi-sanity-truenas-core-12_0
|
||||
- csi-sanity-truenas-core-13_0
|
||||
- csi-sanity-zfs-generic
|
||||
- csi-sanity-zfs-local
|
||||
- csi-sanity-local-hostpath
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
|
|||
17
CHANGELOG.md
17
CHANGELOG.md
|
|
@ -1,6 +1,21 @@
|
|||
# v1.5.0
|
||||
|
||||
Released 2022-02-23
|
||||
|
||||
- massive ci overhaul
|
||||
- add additional drivers
|
||||
- add additional TrueNAS server versions
|
||||
- only build `node_modules` once by using artifacts
|
||||
- support allow/block listing specific tests
|
||||
- better logic waiting for driver socket to appear
|
||||
- introduce `zfs-local-dataset` driver
|
||||
- introduce `zfs-local-zvol` driver
|
||||
- introduce `local-hostpath` driver
|
||||
- support manually provisioned (`node-manual`) `oneclient` volumes
|
||||
|
||||
# v1.4.4
|
||||
|
||||
Release 2021-12-11
|
||||
Released 2021-12-11
|
||||
|
||||
- better adherence to expected csi behavior when volume request for less than
|
||||
minimum volume size is requested (see #137)
|
||||
|
|
|
|||
|
|
@ -97,6 +97,13 @@ RUN chmod +x /usr/local/bin/mount
|
|||
ADD docker/umount /usr/local/bin/umount
|
||||
RUN chmod +x /usr/local/bin/umount
|
||||
|
||||
ADD docker/zfs /usr/local/bin/zfs
|
||||
RUN chmod +x /usr/local/bin/zfs
|
||||
ADD docker/zpool /usr/local/bin/zpool
|
||||
RUN chmod +x /usr/local/bin/zpool
|
||||
ADD docker/oneclient /usr/local/bin/oneclient
|
||||
RUN chmod +x /usr/local/bin/oneclient
|
||||
|
||||
# Run as a non-root user
|
||||
RUN useradd --create-home csi \
|
||||
&& chown -R csi: /home/csi
|
||||
|
|
|
|||
29
README.md
29
README.md
|
|
@ -24,6 +24,8 @@ have access to resizing, snapshots, clones, etc functionality.
|
|||
- `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-local-ephemeral-inline` (provisions node-local zfs datasets)
|
||||
- `zfs-local-dataset` (provision node-local volume as dataset)
|
||||
- `zfs-local-zvol` (provision node-local volume as zvol)
|
||||
- `synology-iscsi` experimental (manages volumes to share over iscsi)
|
||||
- `lustre-client` (crudely provisions storage using a shared lustre
|
||||
share/directory for all volumes)
|
||||
|
|
@ -31,8 +33,9 @@ have access to resizing, snapshots, clones, etc functionality.
|
|||
for all volumes)
|
||||
- `smb-client` (crudely provisions storage using a shared smb share/directory
|
||||
for all volumes)
|
||||
- `node-manual` (allows connecting to manually created smb, nfs, lustre, and
|
||||
iscsi volumes, see sample PVs in the `examples` directory)
|
||||
- `local-hostpath` (crudely provisions node-local directories)
|
||||
- `node-manual` (allows connecting to manually created smb, nfs, lustre,
|
||||
oneclient, and iscsi volumes, see sample PVs in the `examples` directory)
|
||||
- framework for developing `csi` drivers
|
||||
|
||||
If you have any interest in providing a `csi` driver, simply open an issue to
|
||||
|
|
@ -51,6 +54,7 @@ Predominantly 3 things are needed:
|
|||
## Community Guides
|
||||
|
||||
- https://jonathangazeley.com/2021/01/05/using-truenas-to-provide-persistent-storage-for-kubernetes/
|
||||
- https://www.lisenet.com/2021/moving-to-truenas-and-democratic-csi-for-kubernetes-persistent-storage/
|
||||
- https://gist.github.com/admun/4372899f20421a947b7544e5fc9f9117 (migrating
|
||||
from `nfs-client-provisioner` to `democratic-csi`)
|
||||
- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc
|
||||
|
|
@ -141,6 +145,26 @@ necessary.
|
|||
- https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
|
||||
- https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
|
||||
|
||||
### zfs-local-{dataset,zvol}
|
||||
|
||||
This `driver` provisions node-local storage. Each node should have an
|
||||
identically named zfs pool created and avaialble to the `driver`. Note, this is
|
||||
_NOT_ the same thing as using the docker zfs storage driver (although the same
|
||||
pool could be used). Nodes should have the standard `zfs` utilities installed.
|
||||
|
||||
In the name of ease-of-use these drivers by default report `MULTI_NODE` support
|
||||
(`ReadWriteMany` in k8s) however the volumes will implicity only work on the
|
||||
node where originally provisioned. Topology contraints manage this in an
|
||||
automated fashion preventing any undesirable behavior. So while you may
|
||||
provision `MULTI_NODE` / `RWX` volumes, any workloads using the volume will
|
||||
always land a single node and that node will always be the node where the
|
||||
volume is/was provisioned.
|
||||
|
||||
### local-hostpath
|
||||
|
||||
This `driver` provisions node-local storage. Each node should and an
|
||||
identically name folder where volumes will be created.
|
||||
|
||||
## Server Prep
|
||||
|
||||
Server preparation depends slightly on which `driver` you are using.
|
||||
|
|
@ -371,3 +395,4 @@ A special shout out to the wonderful sponsors of the project!
|
|||
- https://datamattsson.tumblr.com/post/624751011659202560/welcome-truenas-core-container-storage-provider
|
||||
- https://github.com/dravanet/truenas-csi
|
||||
- https://github.com/SynologyOpenSource/synology-csi
|
||||
- https://github.com/openebs/zfs-localpv
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
set -x
|
||||
|
||||
export PATH="/usr/local/lib/nodejs/bin:${PATH}"
|
||||
# install deps
|
||||
npm i
|
||||
|
||||
# tar node_modules to keep the number of files low to upload
|
||||
tar -zcf node_modules.tar.gz node_modules
|
||||
|
|
@ -34,6 +34,8 @@ csi-sanity --csi.endpoint "unix://${CSI_ENDPOINT}" \
|
|||
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
|
||||
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
|
||||
--csi.testvolumeexpandsize 2147483648 \
|
||||
--csi.testvolumesize 1073741824
|
||||
--csi.testvolumesize 1073741824 \
|
||||
-ginkgo.skip "${CSI_SANITY_SKIP}" \
|
||||
-ginkgo.focus "${CSI_SANITY_FOCUS}"
|
||||
|
||||
rm -rf "${CSI_SANITY_TEMP_DIR}"
|
||||
|
|
|
|||
|
|
@ -13,7 +13,11 @@ trap _term EXIT
|
|||
|
||||
export PATH="/usr/local/lib/nodejs/bin:${PATH}"
|
||||
# install deps
|
||||
npm i
|
||||
#npm i
|
||||
# install from artifacts
|
||||
if [[ -f "node_modules.tar.gz" ]];then
|
||||
tar -zxf node_modules.tar.gz
|
||||
fi
|
||||
|
||||
# generate key for paths etc
|
||||
export CI_BUILD_KEY=$(uuidgen | cut -d "-" -f 1)
|
||||
|
|
@ -23,7 +27,20 @@ sudo -E ci/bin/launch-server.sh &
|
|||
SUDO_PID=$!
|
||||
|
||||
# wait for server to launch
|
||||
sleep 10
|
||||
#sleep 10
|
||||
|
||||
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
|
||||
iter=0
|
||||
max_iter=60
|
||||
while [ ! -S "${CSI_ENDPOINT}" ];do
|
||||
((++iter))
|
||||
echo "waiting for ${CSI_ENDPOINT} to appear"
|
||||
sleep 1
|
||||
if [[ $iter -gt $max_iter ]];then
|
||||
echo "${CSI_ENDPOINT} failed to appear"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
# launch csi-sanity
|
||||
sudo -E ci/bin/launch-csi-sanity.sh
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
driver: local-hostpath
|
||||
instance_id:
|
||||
local-hostpath:
|
||||
shareBasePath: "/tmp/local-hostpath/${CI_BUILD_KEY}/controller"
|
||||
controllerBasePath: "/tmp/local-hostpath/${CI_BUILD_KEY}/controller"
|
||||
dirPermissionsMode: "0777"
|
||||
dirPermissionsUser: root
|
||||
dirPermissionsGroup: root
|
||||
|
|
@ -0,0 +1,37 @@
|
|||
driver: freenas-iscsi
|
||||
|
||||
httpConnection:
|
||||
protocol: http
|
||||
host: ${TRUENAS_HOST}
|
||||
port: 80
|
||||
#apiKey:
|
||||
username: ${TRUENAS_USERNAME}
|
||||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
sshConnection:
|
||||
host: ${TRUENAS_HOST}
|
||||
port: 22
|
||||
username: ${TRUENAS_USERNAME}
|
||||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
|
||||
iscsi:
|
||||
targetPortal: ${TRUENAS_HOST}
|
||||
interface: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
nameSuffix: ""
|
||||
targetGroups:
|
||||
- targetGroupPortalGroup: 1
|
||||
targetGroupInitiatorGroup: 1
|
||||
targetGroupAuthType: None
|
||||
targetGroupAuthGroup:
|
||||
# 0-100 (0 == ignore)
|
||||
extentAvailThreshold: 0
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
driver: freenas-nfs
|
||||
|
||||
httpConnection:
|
||||
protocol: http
|
||||
host: ${TRUENAS_HOST}
|
||||
port: 80
|
||||
#apiKey:
|
||||
username: ${TRUENAS_USERNAME}
|
||||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
sshConnection:
|
||||
host: ${TRUENAS_HOST}
|
||||
port: 22
|
||||
username: ${TRUENAS_USERNAME}
|
||||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
||||
nfs:
|
||||
shareHost: ${TRUENAS_HOST}
|
||||
shareAlldirs: false
|
||||
shareAllowedHosts: []
|
||||
shareAllowedNetworks: []
|
||||
shareMaprootUser: root
|
||||
shareMaprootGroup: wheel
|
||||
shareMapallUser: ""
|
||||
shareMapallGroup: ""
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
driver: freenas-smb
|
||||
|
||||
httpConnection:
|
||||
protocol: http
|
||||
host: ${TRUENAS_HOST}
|
||||
port: 80
|
||||
#apiKey:
|
||||
username: ${TRUENAS_USERNAME}
|
||||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
sshConnection:
|
||||
host: ${TRUENAS_HOST}
|
||||
port: 22
|
||||
username: ${TRUENAS_USERNAME}
|
||||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetProperties:
|
||||
# smb options
|
||||
#aclmode: restricted
|
||||
#casesensitivity: mixed
|
||||
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
||||
# for smb with guest
|
||||
#datasetPermissionsUser: nobody
|
||||
#datasetPermissionsGroup: nobody
|
||||
|
||||
#datasetPermissionsGroup: root
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m u:kube:full_set:allow"
|
||||
|
||||
smb:
|
||||
shareHost: ${TRUENAS_HOST}
|
||||
#nameTemplate: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
nameSuffix: ""
|
||||
shareAuxiliaryConfigurationTemplate: |
|
||||
#guest ok = yes
|
||||
#guest only = yes
|
||||
shareHome: false
|
||||
shareAllowedHosts: []
|
||||
shareDeniedHosts: []
|
||||
#shareDefaultPermissions: true
|
||||
shareGuestOk: true
|
||||
#shareGuestOnly: true
|
||||
#shareShowHiddenFiles: true
|
||||
shareRecycleBin: true
|
||||
shareBrowsable: false
|
||||
shareAccessBasedEnumeration: true
|
||||
shareTimeMachine: false
|
||||
#shareStorageTask:
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
driver: zfs-local-dataset
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
driver: zfs-local-zvol
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
27
docker/mount
27
docker/mount
|
|
@ -1,5 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
container_supported_filesystems=(
|
||||
"ext2"
|
||||
"ext3"
|
||||
"ext4"
|
||||
"ext4dev"
|
||||
"xfs"
|
||||
"vfat"
|
||||
"nfs"
|
||||
"nfs3"
|
||||
"nfs4"
|
||||
"cifs"
|
||||
"smb"
|
||||
"smb3"
|
||||
"bind"
|
||||
)
|
||||
|
||||
while getopts "t:" opt; do
|
||||
case "$opt" in
|
||||
t)
|
||||
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}"
|
||||
else
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" oneclient "${@:1}"
|
||||
|
|
@ -1,5 +1,32 @@
|
|||
#!/bin/bash
|
||||
|
||||
container_supported_filesystems=(
|
||||
"ext2"
|
||||
"ext3"
|
||||
"ext4"
|
||||
"ext4dev"
|
||||
"xfs"
|
||||
"vfat"
|
||||
"nfs"
|
||||
"nfs3"
|
||||
"nfs4"
|
||||
"cifs"
|
||||
"smb"
|
||||
"smb3"
|
||||
"bind"
|
||||
)
|
||||
|
||||
while getopts "t:" opt; do
|
||||
case "$opt" in
|
||||
t)
|
||||
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" umount "${@:1}"
|
||||
else
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" zfs "${@:1}"
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" zpool "${@:1}"
|
||||
|
|
@ -0,0 +1,10 @@
|
|||
driver: local-hostpath
|
||||
instance_id:
|
||||
local-hostpath:
|
||||
# generally shareBasePath and controllerBasePath should be the same for this
|
||||
# driver, this path should be mounted into the csi-driver container
|
||||
shareBasePath: "/var/lib/csi-local-hostpath"
|
||||
controllerBasePath: "/var/lib/csi-local-hostpath"
|
||||
dirPermissionsMode: "0777"
|
||||
dirPermissionsUser: root
|
||||
dirPermissionsGroup: root
|
||||
|
|
@ -9,10 +9,7 @@ sshConnection:
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
...
|
||||
-----END RSA PRIVATE KEY-----
|
||||
service:
|
||||
identity: {}
|
||||
controller: {}
|
||||
node: {}
|
||||
|
||||
zfs:
|
||||
# can be used to override defaults if necessary
|
||||
# the example below is useful for TrueNAS 12
|
||||
|
|
|
|||
|
|
@ -9,10 +9,7 @@ sshConnection:
|
|||
-----BEGIN RSA PRIVATE KEY-----
|
||||
...
|
||||
-----END RSA PRIVATE KEY-----
|
||||
service:
|
||||
identity: {}
|
||||
controller: {}
|
||||
node: {}
|
||||
|
||||
zfs:
|
||||
# can be used to override defaults if necessary
|
||||
# the example below is useful for TrueNAS 12
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
driver: zfs-local-dataset
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/k8s/local/v
|
||||
detachedSnapshotsDatasetParentName: tank/k8s/local/s
|
||||
|
||||
datasetProperties:
|
||||
# key: value
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
|
|
@ -1,8 +1,4 @@
|
|||
driver: zfs-local-ephemeral-inline
|
||||
service:
|
||||
identity: {}
|
||||
controller: {}
|
||||
node: {}
|
||||
zfs:
|
||||
#chroot: "/host"
|
||||
datasetParentName: tank/k8s/inline
|
||||
|
|
|
|||
|
|
@ -0,0 +1,13 @@
|
|||
driver: zfs-local-zvol
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/k8s/local/v
|
||||
detachedSnapshotsDatasetParentName: tank/k8s/local/s
|
||||
|
||||
datasetProperties:
|
||||
# key: value
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
File diff suppressed because it is too large
Load Diff
14
package.json
14
package.json
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "democratic-csi",
|
||||
"version": "1.4.3",
|
||||
"version": "1.5.0",
|
||||
"description": "kubernetes csi driver framework",
|
||||
"main": "bin/democratic-csi",
|
||||
"scripts": {
|
||||
|
|
@ -18,26 +18,26 @@
|
|||
"url": "https://github.com/democratic-csi/democratic-csi.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@grpc/grpc-js": "^1.3.6",
|
||||
"@grpc/grpc-js": "^1.5.5",
|
||||
"@grpc/proto-loader": "^0.6.0",
|
||||
"@kubernetes/client-node": "^0.16.1",
|
||||
"@kubernetes/client-node": "^0.16.3",
|
||||
"async-mutex": "^0.3.1",
|
||||
"bunyan": "^1.8.15",
|
||||
"grpc-uds": "^0.1.6",
|
||||
"handlebars": "^4.7.7",
|
||||
"js-yaml": "^4.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
"lru-cache": "^6.0.0",
|
||||
"prompt": "^1.2.0",
|
||||
"lru-cache": "^7.4.0",
|
||||
"prompt": "^1.2.2",
|
||||
"request": "^2.88.2",
|
||||
"semver": "^7.3.4",
|
||||
"ssh2": "^1.1.0",
|
||||
"uri-js": "^4.4.1",
|
||||
"uuid": "^8.3.2",
|
||||
"winston": "^3.3.3",
|
||||
"winston": "^3.6.0",
|
||||
"yargs": "^17.0.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"eslint": "^8.1.0"
|
||||
"eslint": "^8.9.0"
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
const _ = require("lodash");
|
||||
const { CsiBaseDriver } = require("../index");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const cp = require("child_process");
|
||||
|
|
@ -41,7 +42,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
|
||||
options.service.identity.capabilities.volume_expansion = [
|
||||
//"UNKNOWN",
|
||||
"ONLINE",
|
||||
//"ONLINE",
|
||||
//"OFFLINE"
|
||||
];
|
||||
}
|
||||
|
|
@ -514,6 +515,11 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
driver.options.instance_id;
|
||||
}
|
||||
|
||||
let accessible_topology;
|
||||
if (typeof this.getAccessibleTopology === "function") {
|
||||
accessible_topology = await this.getAccessibleTopology();
|
||||
}
|
||||
|
||||
const res = {
|
||||
volume: {
|
||||
volume_id: name,
|
||||
|
|
@ -521,6 +527,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
capacity_bytes: 0,
|
||||
content_source: volume_content_source,
|
||||
volume_context,
|
||||
accessible_topology,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -570,15 +577,21 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
* @param {*} call
|
||||
*/
|
||||
async GetCapacity(call) {
|
||||
// really capacity is not used at all with nfs in this fashion, so no reason to enable
|
||||
// here even though it is technically feasible.
|
||||
throw new GrpcError(
|
||||
grpc.status.UNIMPLEMENTED,
|
||||
`operation not supported by driver`
|
||||
);
|
||||
|
||||
const driver = this;
|
||||
|
||||
if (
|
||||
!driver.options.service.controller.capabilities.rpc.includes(
|
||||
"GET_CAPACITY"
|
||||
)
|
||||
) {
|
||||
// really capacity is not used at all with nfs in this fashion, so no reason to enable
|
||||
// here even though it is technically feasible.
|
||||
throw new GrpcError(
|
||||
grpc.status.UNIMPLEMENTED,
|
||||
`operation not supported by driver`
|
||||
);
|
||||
}
|
||||
|
||||
if (call.request.volume_capabilities) {
|
||||
const result = this.assertCapabilities(call.request.volume_capabilities);
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
const _ = require("lodash");
|
||||
|
||||
const { ControllerClientCommonDriver } = require("../controller-client-common");
|
||||
|
||||
const NODE_TOPOLOGY_KEY_NAME = "org.democratic-csi.topology/node";
|
||||
|
||||
/**
|
||||
* Crude local-hostpath driver which simply creates directories to be mounted
|
||||
* and uses rsync for cloning/snapshots
|
||||
*/
|
||||
class ControllerLocalHostpathDriver extends ControllerClientCommonDriver {
|
||||
constructor(ctx, options) {
|
||||
const i_caps = _.get(
|
||||
options,
|
||||
"service.identity.capabilities.service",
|
||||
false
|
||||
);
|
||||
|
||||
const c_caps = _.get(options, "service.controller.capabilities", false);
|
||||
super(...arguments);
|
||||
|
||||
if (!i_caps) {
|
||||
this.ctx.logger.debug("setting local-hostpath identity service caps");
|
||||
|
||||
options.service.identity.capabilities.service = [
|
||||
//"UNKNOWN",
|
||||
"CONTROLLER_SERVICE",
|
||||
"VOLUME_ACCESSIBILITY_CONSTRAINTS",
|
||||
];
|
||||
}
|
||||
|
||||
if (!c_caps) {
|
||||
this.ctx.logger.debug("setting local-hostpath controller service caps");
|
||||
|
||||
if (
|
||||
!options.service.controller.capabilities.rpc.includes("GET_CAPACITY")
|
||||
) {
|
||||
options.service.controller.capabilities.rpc.push("GET_CAPACITY");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
getConfigKey() {
|
||||
return "local-hostpath";
|
||||
}
|
||||
|
||||
getVolumeContext(name) {
|
||||
const driver = this;
|
||||
const config_key = driver.getConfigKey();
|
||||
return {
|
||||
node_attach_driver: "hostpath",
|
||||
path: driver.getShareVolumePath(name),
|
||||
};
|
||||
}
|
||||
|
||||
getFsTypes() {
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* List of topologies associated with the *volume*
|
||||
*
|
||||
* @returns array
|
||||
*/
|
||||
async getAccessibleTopology() {
|
||||
const response = await super.NodeGetInfo(...arguments);
|
||||
return [
|
||||
{
|
||||
segments: {
|
||||
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Add node topologies
|
||||
*
|
||||
* @param {*} call
|
||||
* @returns
|
||||
*/
|
||||
async NodeGetInfo(call) {
|
||||
const response = await super.NodeGetInfo(...arguments);
|
||||
response.accessible_topology = {
|
||||
segments: {
|
||||
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
|
||||
},
|
||||
};
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerLocalHostpathDriver = ControllerLocalHostpathDriver;
|
||||
|
|
@ -1,12 +1,50 @@
|
|||
const { ControllerZfsSshBaseDriver } = require("../controller-zfs-ssh");
|
||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const sleep = require("../../utils/general").sleep;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
||||
const ISCSI_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:iscsi_assets_name";
|
||||
|
||||
class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver {
|
||||
class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
||||
getExecClient() {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
}
|
||||
|
||||
async getZetabyte() {
|
||||
const execClient = this.getExecClient();
|
||||
const options = {};
|
||||
options.executor = new ZfsSshProcessManager(execClient);
|
||||
options.idempotent = true;
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("paths")
|
||||
) {
|
||||
options.paths = this.options.zfs.cli.paths;
|
||||
}
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("sudoEnabled")
|
||||
) {
|
||||
options.sudo = this.getSudoEnabled();
|
||||
}
|
||||
|
||||
if (typeof this.setZetabyteCustomOptions === "function") {
|
||||
await this.setZetabyteCustomOptions(options);
|
||||
}
|
||||
|
||||
return new Zetabyte(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* cannot make this a storage class parameter as storage class/etc context is *not* sent
|
||||
* into various calls such as GetControllerCapabilities etc
|
||||
|
|
@ -30,7 +68,7 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver {
|
|||
*/
|
||||
async createShare(call, datasetName) {
|
||||
const zb = await this.getZetabyte();
|
||||
const sshClient = this.getSshClient();
|
||||
const execClient = this.getExecClient();
|
||||
|
||||
let properties;
|
||||
let response;
|
||||
|
|
@ -194,7 +232,7 @@ create /backstores/block/${iscsiName}
|
|||
|
||||
async deleteShare(call, datasetName) {
|
||||
const zb = await this.getZetabyte();
|
||||
const sshClient = this.getSshClient();
|
||||
const execClient = this.getExecClient();
|
||||
|
||||
let response;
|
||||
let properties;
|
||||
|
|
@ -317,7 +355,7 @@ delete ${iscsiName}
|
|||
}
|
||||
|
||||
async targetCliCommand(data) {
|
||||
const sshClient = this.getSshClient();
|
||||
const execClient = this.getExecClient();
|
||||
const driver = this;
|
||||
|
||||
data = data.trim();
|
||||
|
|
@ -361,8 +399,8 @@ delete ${iscsiName}
|
|||
let options = {
|
||||
pty: true,
|
||||
};
|
||||
let response = await sshClient.exec(
|
||||
sshClient.buildCommand(command, args),
|
||||
let response = await execClient.exec(
|
||||
execClient.buildCommand(command, args),
|
||||
options
|
||||
);
|
||||
if (response.code != 0) {
|
||||
|
|
|
|||
|
|
@ -0,0 +1,56 @@
|
|||
const cp = require("child_process");
|
||||
|
||||
class LocalCliExecClient {
|
||||
constructor(options = {}) {
|
||||
this.options = options;
|
||||
if (this.options.logger) {
|
||||
this.logger = this.options.logger;
|
||||
} else {
|
||||
this.logger = console;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Build a command line from the name and given args
|
||||
* TODO: escape the arguments
|
||||
*
|
||||
* @param {*} name
|
||||
* @param {*} args
|
||||
*/
|
||||
buildCommand(name, args = []) {
|
||||
args.unshift(name);
|
||||
return args.join(" ");
|
||||
}
|
||||
|
||||
debug() {
|
||||
this.logger.silly(...arguments);
|
||||
}
|
||||
|
||||
async exec(command, options = {}) {
|
||||
return new Promise((resolve, reject) => {
|
||||
this.logger.verbose("LocalCliExecClient command: " + command);
|
||||
let process = cp.exec(command, (err, stdout, stderr) => {
|
||||
if (err) {
|
||||
reject(err);
|
||||
}
|
||||
resolve({
|
||||
stderr,
|
||||
stdout,
|
||||
code: process.exitCode,
|
||||
signal: process.exitSignal,
|
||||
});
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* simple wrapper for logging
|
||||
*/
|
||||
spawn() {
|
||||
const command = this.buildCommand(arguments[0], arguments[1]);
|
||||
this.logger.verbose("LocalCliExecClient command: " + command);
|
||||
return cp.exec(command);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.LocalCliClient = LocalCliExecClient;
|
||||
|
|
@ -0,0 +1,228 @@
|
|||
const _ = require("lodash");
|
||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const LocalCliExecClient = require("./exec").LocalCliClient;
|
||||
const os = require("os");
|
||||
const { Zetabyte } = require("../../utils/zfs");
|
||||
|
||||
const ZFS_ASSET_NAME_PROPERTY_NAME = "zfs_asset_name";
|
||||
const NODE_TOPOLOGY_KEY_NAME = "org.democratic-csi.topology/node";
|
||||
|
||||
class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
||||
constructor(ctx, options) {
|
||||
const i_caps = _.get(
|
||||
options,
|
||||
"service.identity.capabilities.service",
|
||||
false
|
||||
);
|
||||
super(...arguments);
|
||||
|
||||
if (!i_caps) {
|
||||
this.ctx.logger.debug("setting zfs-local identity service caps");
|
||||
|
||||
options.service.identity.capabilities.service = [
|
||||
//"UNKNOWN",
|
||||
"CONTROLLER_SERVICE",
|
||||
"VOLUME_ACCESSIBILITY_CONSTRAINTS",
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
getExecClient() {
|
||||
return new LocalCliExecClient({
|
||||
logger: this.ctx.logger,
|
||||
});
|
||||
}
|
||||
|
||||
async getZetabyte() {
|
||||
const execClient = this.getExecClient();
|
||||
|
||||
const options = {};
|
||||
options.executor = execClient;
|
||||
options.idempotent = true;
|
||||
|
||||
/*
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("paths")
|
||||
) {
|
||||
options.paths = this.options.zfs.cli.paths;
|
||||
}
|
||||
*/
|
||||
|
||||
// use env based paths to allow for custom wrapper scripts to chroot to the host
|
||||
options.paths = {
|
||||
zfs: "zfs",
|
||||
zpool: "zpool",
|
||||
sudo: "sudo",
|
||||
chroot: "chroot",
|
||||
};
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("sudoEnabled")
|
||||
) {
|
||||
options.sudo = this.getSudoEnabled();
|
||||
}
|
||||
|
||||
if (typeof this.setZetabyteCustomOptions === "function") {
|
||||
await this.setZetabyteCustomOptions(options);
|
||||
}
|
||||
|
||||
return new Zetabyte(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* cannot make this a storage class parameter as storage class/etc context is *not* sent
|
||||
* into various calls such as GetControllerCapabilities etc
|
||||
*/
|
||||
getDriverZfsResourceType() {
|
||||
switch (this.options.driver) {
|
||||
case "zfs-local-dataset":
|
||||
return "filesystem";
|
||||
case "zfs-local-zvol":
|
||||
return "volume";
|
||||
default:
|
||||
throw new Error("unknown driver: " + this.ctx.args.driver);
|
||||
}
|
||||
}
|
||||
|
||||
getFSTypes() {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return ["zfs"];
|
||||
case "volume":
|
||||
return ["ext3", "ext4", "ext4dev", "xfs"];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Although it is conter-intuitive to advertise node-local volumes as RWX we
|
||||
* do so here to provide an easy out-of-the-box experience as users will by
|
||||
* default want to provision volumes of RWX. The topology contraints
|
||||
* implicity will enforce only a single node can use the volume at a given
|
||||
* time.
|
||||
*
|
||||
* @returns Array
|
||||
*/
|
||||
getAccessModes() {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||
if (access_modes !== null) {
|
||||
return access_modes;
|
||||
}
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
case "volume":
|
||||
return [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* csi controller service
|
||||
*
|
||||
* should create any necessary share resources and return volume context
|
||||
*
|
||||
* @param {*} datasetName
|
||||
*/
|
||||
async createShare(call, datasetName) {
|
||||
let volume_context = {};
|
||||
|
||||
switch (this.options.driver) {
|
||||
case "zfs-local-dataset":
|
||||
volume_context = {
|
||||
node_attach_driver: "zfs-local",
|
||||
[ZFS_ASSET_NAME_PROPERTY_NAME]: datasetName,
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-local-zvol":
|
||||
volume_context = {
|
||||
node_attach_driver: "zfs-local",
|
||||
[ZFS_ASSET_NAME_PROPERTY_NAME]: datasetName,
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: unknown driver ${this.options.driver}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* csi controller service
|
||||
*
|
||||
* @param {*} call
|
||||
* @param {*} datasetName
|
||||
* @returns
|
||||
*/
|
||||
async deleteShare(call, datasetName) {
|
||||
return {};
|
||||
}
|
||||
|
||||
/**
|
||||
* csi controller service
|
||||
*
|
||||
* @param {*} call
|
||||
* @param {*} datasetName
|
||||
*/
|
||||
async expandVolume(call, datasetName) {}
|
||||
|
||||
/**
|
||||
* List of topologies associated with the *volume*
|
||||
*
|
||||
* @returns array
|
||||
*/
|
||||
async getAccessibleTopology() {
|
||||
const response = await super.NodeGetInfo(...arguments);
|
||||
return [
|
||||
{
|
||||
segments: {
|
||||
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
|
||||
},
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Add node topologies
|
||||
*
|
||||
* @param {*} call
|
||||
* @returns
|
||||
*/
|
||||
async NodeGetInfo(call) {
|
||||
const response = await super.NodeGetInfo(...arguments);
|
||||
response.accessible_topology = {
|
||||
segments: {
|
||||
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
|
||||
},
|
||||
};
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerZfsLocalDriver = ControllerZfsLocalDriver;
|
||||
|
|
@ -1,11 +1,8 @@
|
|||
const { CsiBaseDriver } = require("../index");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const sleep = require("../../utils/general").sleep;
|
||||
const getLargestNumber = require("../../utils/general").getLargestNumber;
|
||||
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
const uuidv4 = require("uuid").v4;
|
||||
const semver = require("semver");
|
||||
|
|
@ -32,14 +29,20 @@ const VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME =
|
|||
"democratic-csi:volume_context_provisioner_instance_id";
|
||||
|
||||
/**
|
||||
* Base driver to provisin zfs assets over ssh.
|
||||
* Base driver to provisin zfs assets using zfs cli commands.
|
||||
* Derived drivers only need to implement:
|
||||
* - getExecClient()
|
||||
* - async getZetabyte()
|
||||
* - async setZetabyteCustomOptions(options) // optional
|
||||
* - getDriverZfsResourceType() // return "filesystem" or "volume"
|
||||
* - getFSTypes() // optional
|
||||
* - getAccessModes() // optional
|
||||
* - async getAccessibleTopology() // optional
|
||||
* - async createShare(call, datasetName) // return appropriate volume_context for Node operations
|
||||
* - async deleteShare(call, datasetName) // no return expected
|
||||
* - async expandVolume(call, datasetName) // no return expected, used for restarting services etc if needed
|
||||
*/
|
||||
class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
||||
class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||
constructor(ctx, options) {
|
||||
super(...arguments);
|
||||
|
||||
|
|
@ -146,42 +149,6 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
getSshClient() {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
}
|
||||
|
||||
async getZetabyte() {
|
||||
const sshClient = this.getSshClient();
|
||||
const options = {};
|
||||
options.executor = new ZfsSshProcessManager(sshClient);
|
||||
options.idempotent = true;
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("paths")
|
||||
) {
|
||||
options.paths = this.options.zfs.cli.paths;
|
||||
}
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("sudoEnabled")
|
||||
) {
|
||||
options.sudo = this.getSudoEnabled();
|
||||
}
|
||||
|
||||
if (typeof this.setZetabyteCustomOptions === "function") {
|
||||
await this.setZetabyteCustomOptions(options);
|
||||
}
|
||||
|
||||
return new Zetabyte(options);
|
||||
}
|
||||
|
||||
getSudoEnabled() {
|
||||
return this.options.zfs.cli && this.options.zfs.cli.sudoEnabled === true;
|
||||
}
|
||||
|
|
@ -218,6 +185,43 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
await zb.zfs.destroy(datasetName + "@%", options);
|
||||
}
|
||||
|
||||
getFSTypes() {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return ["nfs", "cifs"];
|
||||
case "volume":
|
||||
return ["ext3", "ext4", "ext4dev", "xfs"];
|
||||
}
|
||||
}
|
||||
|
||||
getAccessModes() {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
case "volume":
|
||||
return [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
];
|
||||
}
|
||||
}
|
||||
|
||||
assertCapabilities(capabilities) {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
|
||||
|
|
@ -234,24 +238,13 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
!["nfs", "cifs"].includes(capability.mount.fs_type)
|
||||
!this.getFSTypes().includes(capability.mount.fs_type)
|
||||
) {
|
||||
message = `invalid fs_type ${capability.mount.fs_type}`;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
) {
|
||||
if (!this.getAccessModes().includes(capability.access_mode.mode)) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -261,26 +254,14 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
if (capability.access_type == "mount") {
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
!["ext3", "ext4", "ext4dev", "xfs"].includes(
|
||||
capability.mount.fs_type
|
||||
)
|
||||
!this.getFSTypes().includes(capability.mount.fs_type)
|
||||
) {
|
||||
message = `invalid fs_type ${capability.mount.fs_type}`;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
) {
|
||||
if (!this.getAccessModes().includes(capability.access_mode.mode)) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -380,6 +361,11 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
let accessible_topology;
|
||||
if (typeof this.getAccessibleTopology === "function") {
|
||||
accessible_topology = await this.getAccessibleTopology();
|
||||
}
|
||||
|
||||
let volume = {
|
||||
// remove parent dataset info
|
||||
volume_id: row["name"].replace(new RegExp("^" + datasetName + "/"), ""),
|
||||
|
|
@ -389,6 +375,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
: row["volsize"],
|
||||
content_source: volume_content_source,
|
||||
volume_context,
|
||||
accessible_topology,
|
||||
};
|
||||
|
||||
return volume;
|
||||
|
|
@ -402,7 +389,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
*/
|
||||
async getMaxZvolNameLength() {
|
||||
const driver = this;
|
||||
const sshClient = driver.getSshClient();
|
||||
const execClient = driver.getExecClient();
|
||||
|
||||
let response;
|
||||
let command;
|
||||
|
|
@ -412,7 +399,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
// get kernel
|
||||
command = "uname -s";
|
||||
driver.ctx.logger.verbose("uname command: %s", command);
|
||||
response = await sshClient.exec(command);
|
||||
response = await execClient.exec(command);
|
||||
if (response.code !== 0) {
|
||||
throw new Error("failed to run uname to determine max zvol name length");
|
||||
} else {
|
||||
|
|
@ -429,7 +416,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
// get kernel_release
|
||||
command = "uname -r";
|
||||
driver.ctx.logger.verbose("uname command: %s", command);
|
||||
response = await sshClient.exec(command);
|
||||
response = await execClient.exec(command);
|
||||
if (response.code !== 0) {
|
||||
throw new Error(
|
||||
"failed to run uname to determine max zvol name length"
|
||||
|
|
@ -481,35 +468,35 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
* limit the actual checks semi sanely
|
||||
* health checks should kick in an restart the pod
|
||||
* this process is 2 checks in 1
|
||||
* - ensure basic ssh connectivity
|
||||
* - ensure basic exec connectivity
|
||||
* - ensure csh is not the operative shell
|
||||
*/
|
||||
if (!driver.currentSSHShell || timerEnabled === false) {
|
||||
const sshClient = this.getSshClient();
|
||||
driver.ctx.logger.debug("performing ssh sanity check..");
|
||||
const response = await sshClient.exec("echo $0");
|
||||
driver.currentSSHShell = response.stdout.split("\n")[0];
|
||||
if (!driver.currentExecShell || timerEnabled === false) {
|
||||
const execClient = this.getExecClient();
|
||||
driver.ctx.logger.debug("performing exec sanity check..");
|
||||
const response = await execClient.exec("echo $0");
|
||||
driver.currentExecShell = response.stdout.split("\n")[0];
|
||||
}
|
||||
|
||||
// update in the background every X interval to prevent incessant checks
|
||||
if (timerEnabled && !driver.currentSSHShellInterval) {
|
||||
if (timerEnabled && !driver.currentExecShellInterval) {
|
||||
const intervalTime = 60000;
|
||||
driver.currentSSHShellInterval = setInterval(async () => {
|
||||
driver.currentExecShellInterval = setInterval(async () => {
|
||||
try {
|
||||
driver.ctx.logger.debug("performing ssh sanity check..");
|
||||
const sshClient = this.getSshClient();
|
||||
const response = await sshClient.exec("echo $0");
|
||||
driver.currentSSHShell = response.stdout.split("\n")[0];
|
||||
driver.ctx.logger.debug("performing exec sanity check..");
|
||||
const execClient = this.getExecClient();
|
||||
const response = await execClient.exec("echo $0");
|
||||
driver.currentExecShell = response.stdout.split("\n")[0];
|
||||
} catch (e) {
|
||||
delete driver.currentSSHShell;
|
||||
delete driver.currentExecShell;
|
||||
}
|
||||
}, intervalTime);
|
||||
}
|
||||
|
||||
if (driver.currentSSHShell.includes("csh")) {
|
||||
if (driver.currentExecShell.includes("csh")) {
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`csh is an unsupported shell, please update the default shell of your ssh user`
|
||||
`csh is an unsupported shell, please update the default shell of your exec user`
|
||||
);
|
||||
}
|
||||
|
||||
|
|
@ -533,7 +520,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
async CreateVolume(call) {
|
||||
const driver = this;
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
const sshClient = this.getSshClient();
|
||||
const execClient = this.getExecClient();
|
||||
const zb = await this.getZetabyte();
|
||||
|
||||
let datasetParentName = this.getVolumeParentDatasetName();
|
||||
|
|
@ -1028,7 +1015,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
|
||||
// set mode
|
||||
if (this.options.zfs.datasetPermissionsMode) {
|
||||
command = sshClient.buildCommand("chmod", [
|
||||
command = execClient.buildCommand("chmod", [
|
||||
this.options.zfs.datasetPermissionsMode,
|
||||
properties.mountpoint.value,
|
||||
]);
|
||||
|
|
@ -1037,7 +1024,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
driver.ctx.logger.verbose("set permission command: %s", command);
|
||||
response = await sshClient.exec(command);
|
||||
response = await execClient.exec(command);
|
||||
if (response.code != 0) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
|
|
@ -1053,7 +1040,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
this.options.zfs.datasetPermissionsUser ||
|
||||
this.options.zfs.datasetPermissionsGroup
|
||||
) {
|
||||
command = sshClient.buildCommand("chown", [
|
||||
command = execClient.buildCommand("chown", [
|
||||
(this.options.zfs.datasetPermissionsUser
|
||||
? this.options.zfs.datasetPermissionsUser
|
||||
: "") +
|
||||
|
|
@ -1068,7 +1055,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
driver.ctx.logger.verbose("set ownership command: %s", command);
|
||||
response = await sshClient.exec(command);
|
||||
response = await execClient.exec(command);
|
||||
if (response.code != 0) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
|
|
@ -1082,7 +1069,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
// probably could see if ^-.*\s and split and then shell escape
|
||||
if (this.options.zfs.datasetPermissionsAcls) {
|
||||
for (const acl of this.options.zfs.datasetPermissionsAcls) {
|
||||
command = sshClient.buildCommand("setfacl", [
|
||||
command = execClient.buildCommand("setfacl", [
|
||||
acl,
|
||||
properties.mountpoint.value,
|
||||
]);
|
||||
|
|
@ -1091,7 +1078,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
driver.ctx.logger.verbose("set acl command: %s", command);
|
||||
response = await sshClient.exec(command);
|
||||
response = await execClient.exec(command);
|
||||
if (response.code != 0) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
|
|
@ -1137,6 +1124,11 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
// this should give us a relatively sane way to clean up artifacts over time
|
||||
await zb.zfs.set(datasetName, { [SUCCESS_PROPERTY_NAME]: "true" });
|
||||
|
||||
let accessible_topology;
|
||||
if (typeof this.getAccessibleTopology === "function") {
|
||||
accessible_topology = await this.getAccessibleTopology();
|
||||
}
|
||||
|
||||
const res = {
|
||||
volume: {
|
||||
volume_id: name,
|
||||
|
|
@ -1148,6 +1140,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
: 0,
|
||||
content_source: volume_content_source,
|
||||
volume_context,
|
||||
accessible_topology,
|
||||
},
|
||||
};
|
||||
|
||||
|
|
@ -1562,7 +1555,6 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`);
|
||||
if (entries) {
|
||||
entries = JSON.parse(JSON.stringify(entries));
|
||||
entries_length = entries.length;
|
||||
entries = entries.slice(start_position, end_position);
|
||||
if (max_entries > 0 && end_position > entries_length) {
|
||||
|
|
@ -1659,10 +1651,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
|
||||
if (max_entries && entries.length > max_entries) {
|
||||
uuid = uuidv4();
|
||||
this.ctx.cache.set(
|
||||
`ListVolumes:result:${uuid}`,
|
||||
JSON.parse(JSON.stringify(entries))
|
||||
);
|
||||
this.ctx.cache.set(`ListVolumes:result:${uuid}`, entries);
|
||||
next_token = `${uuid}:${max_entries}`;
|
||||
entries = entries.slice(0, max_entries);
|
||||
}
|
||||
|
|
@ -2366,4 +2355,4 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerZfsSshBaseDriver = ControllerZfsSshBaseDriver;
|
||||
module.exports.ControllerZfsBaseDriver = ControllerZfsBaseDriver;
|
||||
|
|
@ -1,6 +1,10 @@
|
|||
const { FreeNASSshDriver } = require("./freenas/ssh");
|
||||
const { FreeNASApiDriver } = require("./freenas/api");
|
||||
const {
|
||||
ControllerLocalHostpathDriver,
|
||||
} = require("./controller-local-hostpath");
|
||||
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
|
||||
const { ControllerZfsLocalDriver } = require("./controller-zfs-local");
|
||||
const {
|
||||
ZfsLocalEphemeralInlineDriver,
|
||||
} = require("./zfs-local-ephemeral-inline");
|
||||
|
|
@ -31,12 +35,17 @@ function factory(ctx, options) {
|
|||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-iscsi":
|
||||
return new ControllerZfsGenericDriver(ctx, options);
|
||||
case "zfs-local-dataset":
|
||||
case "zfs-local-zvol":
|
||||
return new ControllerZfsLocalDriver(ctx, options);
|
||||
case "zfs-local-ephemeral-inline":
|
||||
return new ZfsLocalEphemeralInlineDriver(ctx, options);
|
||||
case "smb-client":
|
||||
return new ControllerSmbClientDriver(ctx, options);
|
||||
case "nfs-client":
|
||||
return new ControllerNfsClientDriver(ctx, options);
|
||||
case "local-hostpath":
|
||||
return new ControllerLocalHostpathDriver(ctx, options);
|
||||
case "lustre-client":
|
||||
return new ControllerLustreClientDriver(ctx, options);
|
||||
case "node-manual":
|
||||
|
|
|
|||
|
|
@ -3123,7 +3123,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
}
|
||||
entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`);
|
||||
if (entries) {
|
||||
entries = JSON.parse(JSON.stringify(entries));
|
||||
entries_length = entries.length;
|
||||
entries = entries.slice(start_position, end_position);
|
||||
if (max_entries > 0 && end_position > entries_length) {
|
||||
|
|
@ -3219,10 +3218,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
|
||||
if (max_entries && entries.length > max_entries) {
|
||||
uuid = uuidv4();
|
||||
this.ctx.cache.set(
|
||||
`ListVolumes:result:${uuid}`,
|
||||
JSON.parse(JSON.stringify(entries))
|
||||
);
|
||||
this.ctx.cache.set(`ListVolumes:result:${uuid}`, entries);
|
||||
next_token = `${uuid}:${max_entries}`;
|
||||
entries = entries.slice(0, max_entries);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
const { ControllerZfsSshBaseDriver } = require("../controller-zfs-ssh");
|
||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const HttpClient = require("./http").Client;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
||||
|
|
@ -18,7 +20,43 @@ const FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME =
|
|||
|
||||
// used for in-memory cache of the version info
|
||||
const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
|
||||
class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
|
||||
class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||
getExecClient() {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
}
|
||||
|
||||
async getZetabyte() {
|
||||
const sshClient = this.getExecClient();
|
||||
const options = {};
|
||||
options.executor = new ZfsSshProcessManager(sshClient);
|
||||
options.idempotent = true;
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("paths")
|
||||
) {
|
||||
options.paths = this.options.zfs.cli.paths;
|
||||
}
|
||||
|
||||
if (
|
||||
this.options.zfs.hasOwnProperty("cli") &&
|
||||
this.options.zfs.cli &&
|
||||
this.options.zfs.cli.hasOwnProperty("sudoEnabled")
|
||||
) {
|
||||
options.sudo = this.getSudoEnabled();
|
||||
}
|
||||
|
||||
if (typeof this.setZetabyteCustomOptions === "function") {
|
||||
await this.setZetabyteCustomOptions(options);
|
||||
}
|
||||
|
||||
return new Zetabyte(options);
|
||||
}
|
||||
|
||||
/**
|
||||
* cannot make this a storage class parameter as storage class/etc context is *not* sent
|
||||
* into various calls such as GetControllerCapabilities etc
|
||||
|
|
@ -169,7 +207,7 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
|
|||
async createShare(call, datasetName) {
|
||||
const driver = this;
|
||||
const driverShareType = this.getDriverShareType();
|
||||
const sshClient = this.getSshClient();
|
||||
const execClient = this.getExecClient();
|
||||
const httpClient = await this.getHttpClient();
|
||||
const apiVersion = httpClient.getApiVersion();
|
||||
const zb = await this.getZetabyte();
|
||||
|
|
@ -1626,7 +1664,7 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
|
|||
|
||||
async expandVolume(call, datasetName) {
|
||||
const driverShareType = this.getDriverShareType();
|
||||
const sshClient = this.getSshClient();
|
||||
const execClient = this.getExecClient();
|
||||
const zb = await this.getZetabyte();
|
||||
|
||||
switch (driverShareType) {
|
||||
|
|
@ -1645,7 +1683,7 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
|
|||
properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
|
||||
/**
|
||||
* command = sshClient.buildCommand("systemctl", ["reload", "scst"]);
|
||||
* command = execClient.buildCommand("systemctl", ["reload", "scst"]);
|
||||
* does not help ^
|
||||
*
|
||||
* echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size
|
||||
|
|
@ -1657,13 +1695,13 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
|
|||
* midclt resync_lun_size_for_zvol tank/foo/bar
|
||||
* works on SCALE only ^
|
||||
*/
|
||||
command = sshClient.buildCommand("sh", [
|
||||
command = execClient.buildCommand("sh", [
|
||||
"-c",
|
||||
`echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size`,
|
||||
]);
|
||||
reload = true;
|
||||
} else {
|
||||
command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]);
|
||||
command = execClient.buildCommand("/etc/rc.d/ctld", ["reload"]);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
|
|
@ -1677,7 +1715,7 @@ class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
|
|||
command
|
||||
);
|
||||
|
||||
let response = await sshClient.exec(command);
|
||||
let response = await execClient.exec(command);
|
||||
if (response.code != 0) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
|
|
|
|||
|
|
@ -1,12 +1,15 @@
|
|||
const _ = require("lodash");
|
||||
const cp = require("child_process");
|
||||
const os = require("os");
|
||||
const fs = require("fs");
|
||||
const { GrpcError, grpc } = require("../utils/grpc");
|
||||
const { Mount } = require("../utils/mount");
|
||||
const { OneClient } = require("../utils/oneclient");
|
||||
const { Filesystem } = require("../utils/filesystem");
|
||||
const { ISCSI } = require("../utils/iscsi");
|
||||
const semver = require("semver");
|
||||
const sleep = require("../utils/general").sleep;
|
||||
const { Zetabyte } = require("../utils/zfs");
|
||||
|
||||
/**
|
||||
* common code shared between all drivers
|
||||
|
|
@ -325,11 +328,19 @@ class CsiBaseDriver {
|
|||
if (normalizedSecrets.mount_flags) {
|
||||
mount_flags.push(normalizedSecrets.mount_flags);
|
||||
}
|
||||
mount_flags.push("defaults");
|
||||
|
||||
// https://github.com/karelzak/util-linux/issues/1429
|
||||
//mount_flags.push("x-democratic-csi.managed");
|
||||
//mount_flags.push("x-democratic-csi.staged");
|
||||
switch (node_attach_driver) {
|
||||
case "oneclient":
|
||||
// move along
|
||||
break;
|
||||
default:
|
||||
mount_flags.push("defaults");
|
||||
|
||||
// https://github.com/karelzak/util-linux/issues/1429
|
||||
//mount_flags.push("x-democratic-csi.managed");
|
||||
//mount_flags.push("x-democratic-csi.staged");
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
semver.satisfies(driver.ctx.csiVersion, ">=1.5.0") &&
|
||||
|
|
@ -565,6 +576,108 @@ class CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
break;
|
||||
case "hostpath":
|
||||
result = await mount.pathIsMounted(staging_target_path);
|
||||
// if not mounted, mount
|
||||
if (!result) {
|
||||
await mount.bindMount(volume_context.path, staging_target_path);
|
||||
return {};
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
|
||||
break;
|
||||
case "oneclient":
|
||||
let oneclient = new OneClient();
|
||||
device = "oneclient";
|
||||
result = await mount.deviceIsMountedAtPath(device, staging_target_path);
|
||||
if (result) {
|
||||
return {};
|
||||
}
|
||||
|
||||
if (volume_context.space_names) {
|
||||
volume_context.space_names.split(",").forEach((space) => {
|
||||
mount_flags.push("--space", space);
|
||||
});
|
||||
}
|
||||
|
||||
if (volume_context.space_ids) {
|
||||
volume_context.space_ids.split(",").forEach((space) => {
|
||||
mount_flags.push("--space-id", space);
|
||||
});
|
||||
}
|
||||
|
||||
if (normalizedSecrets.token) {
|
||||
mount_flags.push("-t", normalizedSecrets.token);
|
||||
} else {
|
||||
if (volume_context.token) {
|
||||
mount_flags.push("-t", volume_context.token);
|
||||
}
|
||||
}
|
||||
|
||||
result = await oneclient.mount(
|
||||
staging_target_path,
|
||||
["-H", volume_context.server].concat(mount_flags)
|
||||
);
|
||||
|
||||
if (result) {
|
||||
return {};
|
||||
}
|
||||
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`failed to mount oneclient: ${volume_context.server}`
|
||||
);
|
||||
|
||||
break;
|
||||
case "zfs-local":
|
||||
// TODO: make this a geneic zb instance (to ensure works with node-manual driver)
|
||||
const zb = new Zetabyte({
|
||||
idempotent: true,
|
||||
paths: {
|
||||
zfs: "zfs",
|
||||
zpool: "zpool",
|
||||
sudo: "sudo",
|
||||
chroot: "chroot",
|
||||
},
|
||||
//logger: driver.ctx.logger,
|
||||
executor: {
|
||||
spawn: function () {
|
||||
const command = `${arguments[0]} ${arguments[1].join(" ")}`;
|
||||
return cp.exec(command);
|
||||
},
|
||||
},
|
||||
log_commands: true,
|
||||
});
|
||||
result = await zb.zfs.get(`${volume_context.zfs_asset_name}`, [
|
||||
"type",
|
||||
"mountpoint",
|
||||
]);
|
||||
result = result[`${volume_context.zfs_asset_name}`];
|
||||
switch (result.type.value) {
|
||||
case "filesystem":
|
||||
if (result.mountpoint.value != "legacy") {
|
||||
// zfs set mountpoint=legacy <dataset>
|
||||
// zfs inherit mountpoint <dataset>
|
||||
await zb.zfs.set(`${volume_context.zfs_asset_name}`, {
|
||||
mountpoint: "legacy",
|
||||
});
|
||||
}
|
||||
device = `${volume_context.zfs_asset_name}`;
|
||||
if (!fs_type) {
|
||||
fs_type = "zfs";
|
||||
}
|
||||
break;
|
||||
case "volume":
|
||||
device = `/dev/zvol/${volume_context.zfs_asset_name}`;
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`unknown zfs asset type: ${result.type.value}`
|
||||
);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.INVALID_ARGUMENT,
|
||||
|
|
@ -574,53 +687,59 @@ class CsiBaseDriver {
|
|||
|
||||
switch (access_type) {
|
||||
case "mount":
|
||||
let is_block = false;
|
||||
switch (node_attach_driver) {
|
||||
// block specific logic
|
||||
case "iscsi":
|
||||
if (!fs_type) {
|
||||
fs_type = "ext4";
|
||||
}
|
||||
is_block = true;
|
||||
break;
|
||||
case "zfs-local":
|
||||
is_block = device.startsWith("/dev/zvol/");
|
||||
break;
|
||||
}
|
||||
|
||||
if (await filesystem.isBlockDevice(device)) {
|
||||
// format
|
||||
result = await filesystem.deviceIsFormatted(device);
|
||||
if (!result) {
|
||||
let formatOptions = _.get(
|
||||
driver.options.node.format,
|
||||
[fs_type, "customOptions"],
|
||||
[]
|
||||
);
|
||||
if (!Array.isArray(formatOptions)) {
|
||||
formatOptions = [];
|
||||
}
|
||||
await filesystem.formatDevice(device, fs_type, formatOptions);
|
||||
}
|
||||
if (is_block) {
|
||||
// block specific logic
|
||||
if (!fs_type) {
|
||||
fs_type = "ext4";
|
||||
}
|
||||
|
||||
let fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
fs_type = fs_info.type;
|
||||
|
||||
// fsck
|
||||
result = await mount.deviceIsMountedAtPath(
|
||||
device,
|
||||
staging_target_path
|
||||
if (await filesystem.isBlockDevice(device)) {
|
||||
// format
|
||||
result = await filesystem.deviceIsFormatted(device);
|
||||
if (!result) {
|
||||
let formatOptions = _.get(
|
||||
driver.options.node.format,
|
||||
[fs_type, "customOptions"],
|
||||
[]
|
||||
);
|
||||
if (!result) {
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/52#issuecomment-768463401
|
||||
let checkFilesystem =
|
||||
driver.options.node.mount.checkFilesystem[fs_type] || {};
|
||||
if (checkFilesystem.enabled) {
|
||||
await filesystem.checkFilesystem(
|
||||
device,
|
||||
fs_type,
|
||||
checkFilesystem.customOptions || [],
|
||||
checkFilesystem.customFilesystemOptions || []
|
||||
);
|
||||
}
|
||||
if (!Array.isArray(formatOptions)) {
|
||||
formatOptions = [];
|
||||
}
|
||||
await filesystem.formatDevice(device, fs_type, formatOptions);
|
||||
}
|
||||
|
||||
let fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
fs_type = fs_info.type;
|
||||
|
||||
// fsck
|
||||
result = await mount.deviceIsMountedAtPath(
|
||||
device,
|
||||
staging_target_path
|
||||
);
|
||||
if (!result) {
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/52#issuecomment-768463401
|
||||
let checkFilesystem =
|
||||
driver.options.node.mount.checkFilesystem[fs_type] || {};
|
||||
if (checkFilesystem.enabled) {
|
||||
await filesystem.checkFilesystem(
|
||||
device,
|
||||
fs_type,
|
||||
checkFilesystem.customOptions || [],
|
||||
checkFilesystem.customFilesystemOptions || []
|
||||
);
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
result = await mount.deviceIsMountedAtPath(device, staging_target_path);
|
||||
|
|
@ -1011,7 +1130,10 @@ class CsiBaseDriver {
|
|||
case "nfs":
|
||||
case "smb":
|
||||
case "lustre":
|
||||
case "oneclient":
|
||||
case "hostpath":
|
||||
case "iscsi":
|
||||
case "zfs-local":
|
||||
// ensure appropriate directories/files
|
||||
switch (access_type) {
|
||||
case "mount":
|
||||
|
|
@ -1348,12 +1470,17 @@ class CsiBaseDriver {
|
|||
rescan_devices.push(device);
|
||||
|
||||
for (let sdevice of rescan_devices) {
|
||||
// TODO: technically rescan is only relevant/available for remote drives
|
||||
// such as iscsi etc, should probably limit this call as appropriate
|
||||
// for now crudely checking the scenario inside the method itself
|
||||
await filesystem.rescanDevice(sdevice);
|
||||
}
|
||||
|
||||
// let things settle
|
||||
// it appears the dm devices can take a second to figure things out
|
||||
await sleep(2000);
|
||||
if (is_device_mapper || true) {
|
||||
await sleep(2000);
|
||||
}
|
||||
|
||||
if (is_formatted && access_type == "mount") {
|
||||
fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
|
|
|
|||
|
|
@ -106,6 +106,7 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
let message = null;
|
||||
let driverResourceType;
|
||||
let fs_types = [];
|
||||
let access_modes = [];
|
||||
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
|
||||
switch (node_attach_driver) {
|
||||
case "nfs":
|
||||
|
|
@ -120,10 +121,27 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
driverResourceType = "filesystem";
|
||||
fs_types = ["lustre"];
|
||||
break;
|
||||
case "oneclient":
|
||||
driverResourceType = "filesystem";
|
||||
fs_types = ["oneclient", "fuse.oneclient"];
|
||||
break;
|
||||
case "hostpath":
|
||||
driverResourceType = "filesystem";
|
||||
break;
|
||||
case "iscsi":
|
||||
driverResourceType = "volume";
|
||||
fs_types = ["ext3", "ext4", "ext4dev", "xfs"];
|
||||
break;
|
||||
case "zfs-local":
|
||||
driverResourceType = "volume";
|
||||
fs_types = ["ext3", "ext4", "ext4dev", "xfs", "zfs"];
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
];
|
||||
default:
|
||||
return {
|
||||
valid: false,
|
||||
|
|
@ -134,6 +152,18 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
const valid = capabilities.every((capability) => {
|
||||
switch (driverResourceType) {
|
||||
case "filesystem":
|
||||
if (access_modes.length == 0) {
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
}
|
||||
if (capability.access_type != "mount") {
|
||||
message = `invalid access_type ${capability.access_type}`;
|
||||
return false;
|
||||
|
|
@ -147,8 +177,15 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
![
|
||||
if (!access_modes.includes(capability.access_mode.mode)) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
case "volume":
|
||||
if (access_modes.length == 0) {
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
|
|
@ -156,15 +193,8 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
];
|
||||
}
|
||||
|
||||
return true;
|
||||
case "volume":
|
||||
if (capability.access_type == "mount") {
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
|
|
@ -175,17 +205,7 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
) {
|
||||
if (!access_modes.includes(capability.access_mode.mode)) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -431,8 +431,12 @@ class Filesystem {
|
|||
|
||||
// echo 1 > /sys/block/sdb/device/rescan
|
||||
const sys_file = `/sys/block/${device_name}/device/rescan`;
|
||||
console.log(`executing filesystem command: echo 1 > ${sys_file}`);
|
||||
fs.writeFileSync(sys_file, "1");
|
||||
|
||||
// node-local devices cannot be rescanned, so ignore
|
||||
if (await filesystem.pathExists(sys_file)) {
|
||||
console.log(`executing filesystem command: echo 1 > ${sys_file}`);
|
||||
fs.writeFileSync(sys_file, "1");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ FINDMNT_COMMON_OPTIONS = [
|
|||
"--nofsroot", // prevents unwanted behavior with cifs volumes
|
||||
];
|
||||
|
||||
DEFAUT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
|
||||
DEFAULT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
|
||||
|
||||
class Mount {
|
||||
constructor(options = {}) {
|
||||
|
|
@ -34,6 +34,10 @@ class Mount {
|
|||
options.paths.sudo = "/usr/bin/sudo";
|
||||
}
|
||||
|
||||
if (!options.paths.chroot) {
|
||||
options.paths.chroot = "/usr/sbin/chroot";
|
||||
}
|
||||
|
||||
if (!options.timeout) {
|
||||
options.timeout = 10 * 60 * 1000;
|
||||
}
|
||||
|
|
@ -379,7 +383,7 @@ class Mount {
|
|||
|
||||
exec(command, args, options = {}) {
|
||||
if (!options.hasOwnProperty("timeout")) {
|
||||
options.timeout = DEFAUT_TIMEOUT;
|
||||
options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
const mount = this;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,147 @@
|
|||
const cp = require("child_process");
|
||||
|
||||
DEFAULT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
|
||||
|
||||
/**
|
||||
* - https://github.com/onedata/oneclient
|
||||
*/
|
||||
class OneClient {
|
||||
constructor(options = {}) {
|
||||
const oneclient = this;
|
||||
oneclient.options = options;
|
||||
|
||||
options.paths = options.paths || {};
|
||||
if (!options.paths.oneclient) {
|
||||
options.paths.oneclient = "oneclient";
|
||||
}
|
||||
|
||||
if (!options.paths.sudo) {
|
||||
options.paths.sudo = "/usr/bin/sudo";
|
||||
}
|
||||
|
||||
if (!options.paths.chroot) {
|
||||
options.paths.chroot = "/usr/sbin/chroot";
|
||||
}
|
||||
|
||||
if (!options.timeout) {
|
||||
options.timeout = 10 * 60 * 1000;
|
||||
}
|
||||
|
||||
if (!options.executor) {
|
||||
options.executor = {
|
||||
spawn: cp.spawn,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* oneclient [options] <directory>
|
||||
*
|
||||
* @param {*} target
|
||||
* @param {*} options
|
||||
*/
|
||||
async mount(target, options = []) {
|
||||
const oneclient = this;
|
||||
let args = [];
|
||||
args = args.concat(options);
|
||||
args = args.concat([target]);
|
||||
|
||||
let result;
|
||||
try {
|
||||
result = await oneclient.exec(oneclient.options.paths.oneclient, args);
|
||||
return result;
|
||||
} catch (err) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* oneclient -u <directory>
|
||||
*
|
||||
* @param {*} target
|
||||
*/
|
||||
async umount(target) {
|
||||
const oneclient = this;
|
||||
let args = ["-u"];
|
||||
args.push(target);
|
||||
|
||||
try {
|
||||
await oneclient.exec(oneclient.options.paths.oneclient, args);
|
||||
} catch (err) {
|
||||
throw err;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
exec(command, args, options = {}) {
|
||||
if (!options.hasOwnProperty("timeout")) {
|
||||
options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
const oneclient = this;
|
||||
args = args || [];
|
||||
|
||||
let timeout;
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
if (oneclient.options.sudo) {
|
||||
args.unshift(command);
|
||||
command = oneclient.options.paths.sudo;
|
||||
}
|
||||
|
||||
// replace -t <token> with -t redacted
|
||||
const regex = /(?<=\-t) (?:[^\s]+)/gi;
|
||||
const cleansedLog = `${command} ${args.join(" ")}`.replace(
|
||||
regex,
|
||||
" redacted"
|
||||
);
|
||||
|
||||
console.log("executing oneclient command: %s", cleansedLog);
|
||||
const child = oneclient.options.executor.spawn(command, args, options);
|
||||
|
||||
/**
|
||||
* timeout option natively supported since v16
|
||||
* TODO: properly handle this based on nodejs version
|
||||
*/
|
||||
let didTimeout = false;
|
||||
if (options && options.timeout) {
|
||||
timeout = setTimeout(() => {
|
||||
didTimeout = true;
|
||||
child.kill(options.killSignal || "SIGTERM");
|
||||
}, options.timeout);
|
||||
}
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
child.stdout.on("data", function (data) {
|
||||
stdout = stdout + data;
|
||||
});
|
||||
|
||||
child.stderr.on("data", function (data) {
|
||||
stderr = stderr + data;
|
||||
});
|
||||
|
||||
child.on("close", function (code) {
|
||||
const result = { code, stdout, stderr, timeout: false };
|
||||
|
||||
if (timeout) {
|
||||
clearTimeout(timeout);
|
||||
}
|
||||
|
||||
// timeout scenario
|
||||
if (code === null) {
|
||||
result.timeout = true;
|
||||
reject(result);
|
||||
}
|
||||
|
||||
if (code) {
|
||||
reject(result);
|
||||
} else {
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.OneClient = OneClient;
|
||||
|
|
@ -38,6 +38,14 @@ class Zetabyte {
|
|||
};
|
||||
}
|
||||
|
||||
if (!options.logger) {
|
||||
options.logger = console;
|
||||
}
|
||||
|
||||
if (!options.hasOwnProperty("log_commands")) {
|
||||
options.log_commands = false;
|
||||
}
|
||||
|
||||
zb.DEFAULT_ZPOOL_LIST_PROPERTIES = [
|
||||
"name",
|
||||
"size",
|
||||
|
|
@ -1548,6 +1556,15 @@ class Zetabyte {
|
|||
command = zb.options.paths.sudo;
|
||||
}
|
||||
|
||||
if (zb.options.log_commands) {
|
||||
if (typeof zb.options.logger.verbose != "function") {
|
||||
zb.options.logger.verbose = function() {
|
||||
console.debug(...arguments);
|
||||
}
|
||||
}
|
||||
zb.options.logger.verbose(`executing zfs command: ${command} ${args.join(" ")}`);
|
||||
}
|
||||
|
||||
const child = zb.options.executor.spawn(command, args, options);
|
||||
|
||||
let didTimeout = false;
|
||||
|
|
|
|||
Loading…
Reference in New Issue