Merge pull request #88 from democratic-csi/next

add smb-client driver, share code with nfs-client driver
This commit is contained in:
Travis Glenn Hansen 2021-09-02 23:10:40 -06:00 committed by GitHub
commit 72b29c860b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 11388 additions and 1037 deletions

View File

@ -1,11 +1,16 @@
#!/bin/bash
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
echo "$GHCR_PASSWORD" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin
export DOCKER_ORG="democraticcsi"
export DOCKER_PROJECT="democratic-csi"
export DOCKER_REPO="${DOCKER_ORG}/${DOCKER_PROJECT}"
export GHCR_ORG="democratic-csi"
export GHCR_PROJECT="democratic-csi"
export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}"
if [[ $GITHUB_REF == refs/tags/* ]]; then
export GIT_TAG=${GITHUB_REF#refs/tags/}
else
@ -13,12 +18,12 @@ else
fi
if [[ -n "${GIT_TAG}" ]]; then
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_TAG} .
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_TAG} -t ${GHCR_REPO}:${GIT_TAG} .
elif [[ -n "${GIT_BRANCH}" ]]; then
if [[ "${GIT_BRANCH}" == "master" ]]; then
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:latest .
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:latest -t ${GHCR_REPO}:latest .
else
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_BRANCH} .
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_BRANCH} -t ${GHCR_REPO}:${GIT_BRANCH} .
fi
else
:

View File

@ -34,5 +34,7 @@ jobs:
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
DOCKER_CLI_EXPERIMENTAL: enabled
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7

View File

@ -1,3 +1,24 @@
# v1.3.0
Released 2021-09-02
- use `ghcr.io` for images as well as docker hub (#90)
- introduce api-only drivers for freenas (`freenas-api-*`)
- `smb-client` driver which creates folders on an smb share
- `lustre-client` driver which creates folders on a lustre share
attaching to various volumes which have been pre-provisioned by the operator
- `synology-iscsi` driver
- various documentation improvements
- support for csi versions `1.4.0` and `1.5.0`
- reintroduce advanced options that allow control over `fsck` (#85)
- advanced options for customizing `mkfs` commands
- better handling of stale nfs connections
- do not log potentially sensitive data in mount commands
- timeouts on various commands to improve driver operations under adverse
conditions
- various fixes and improvements throughout
- dependency bumps
# v1.2.0
Released 2021-05-12

View File

@ -12,12 +12,12 @@ RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/*
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8
ENV NODE_VERSION=v12.20.0
ENV NODE_VERSION=v12.22.6
#ENV NODE_VERSION=v14.15.1
ENV NODE_ENV=production
# install build deps
RUN apt-get update && apt-get install -y python make gcc g++
RUN apt-get update && apt-get install -y python make cmake gcc g++
# install node
RUN apt-get update && apt-get install -y wget xz-utils
@ -43,6 +43,8 @@ RUN rm -rf docker
######################
FROM debian:10-slim
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETPLATFORM

View File

@ -18,13 +18,21 @@ have access to resizing, snapshots, clones, etc functionality.
- `freenas-nfs` (manages zfs datasets to share over nfs)
- `freenas-iscsi` (manages zfs zvols to share over iscsi)
- `freenas-smb` (manages zfs datasets to share over smb)
- `freenas-api-nfs` experimental use with SCALE only (manages zfs datasets to share over nfs)
- `freenas-api-iscsi` experimental use with SCALE only (manages zfs zvols to share over iscsi)
- `freenas-api-smb` experimental use with SCALE only (manages zfs datasets to share over smb)
- `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu)
- `zfs-local-ephemeral-inline` (provisions node-local zfs datasets)
- `synology-iscsi` experimental (manages volumes to share over iscsi)
- `lustre-client` (crudely provisions storage using a shared lustre
share/directory for all volumes)
- `nfs-client` (crudely provisions storage using a shared nfs share/directory
for all volumes)
- `node-manual` (allows connecting to manually created smb, nfs, and iscsi
volumes, see sample PVs in the `examples` directory)
- `smb-client` (crudely provisions storage using a shared smb share/directory
for all volumes)
- `node-manual` (allows connecting to manually created smb, nfs, lustre, and
iscsi volumes, see sample PVs in the `examples` directory)
- framework for developing `csi` drivers
If you have any interest in providing a `csi` driver, simply open an issue to
@ -40,11 +48,13 @@ Predominantly 3 things are needed:
- deploy the driver into the cluster (`helm` chart provided with sample
`values.yaml`)
## Guides
## Community Guides
- https://jonathangazeley.com/2021/01/05/using-truenas-to-provide-persistent-storage-for-kubernetes/
- https://gist.github.com/admun/4372899f20421a947b7544e5fc9f9117 (migrating
from `nfs-client-provisioner` to `democratic-csi`)
- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc
(migrating between storage classes using `velero`)
## Node Prep
@ -135,11 +145,16 @@ necessary.
Server preparation depends slightly on which `driver` you are using.
### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb)
### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb, freenas-api-nfs, freenas-api-iscsi, freenas-api-smb)
The recommended version of FreeNAS is 12.0-U2+, however the driver should work
with much older versions as well.
The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be
used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh`
connections and do all operations entirely with the TrueNAS api. With that in
mind, any ssh/shell/etc requirements below can be safely ignored.
Ensure the following services are configurged and running:
- ssh (if you use a password for authentication make sure it is allowed)
@ -172,6 +187,7 @@ non-`root` user when connecting to the FreeNAS server:
```
csi ALL=(ALL) NOPASSWD:ALL
```
(note this can get reset by FreeNAS if you alter the user via the
GUI later)
@ -203,6 +219,10 @@ Ensure ssh and zfs is installed on the nfs/iscsi server and that you have instal
- `sudo yum install targetcli -y`
- `sudo apt-get -y install targetcli-fb`
### Synology (synology-iscsi)
Ensure iscsi manager has been installed and is generally setup/configured.
## Helm Installation
```
@ -247,6 +267,9 @@ microk8s helm upgrade \
zfs-nfs democratic-csi/democratic-csi
```
- microk8s - `/var/snap/microk8s/common/var/lib/kubelet`
- pivotal - `/var/vcap/data/kubelet`
### openshift
`democratic-csi` generally works fine with openshift. Some special parameters

View File

@ -32,7 +32,16 @@ const args = require("yargs")
})
.option("csi-version", {
describe: "versin of the csi spec to load",
choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"],
choices: [
"0.2.0",
"0.3.0",
"1.0.0",
"1.1.0",
"1.2.0",
"1.3.0",
"1.4.0",
"1.5.0",
],
})
.demandOption(["csi-version"], "csi-version is required")
.option("csi-name", {

View File

@ -10,7 +10,16 @@ const args = require("yargs")
.usage("$0 [options]")
.option("csi-version", {
describe: "versin of the csi spec to load",
choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"],
choices: [
"0.2.0",
"0.3.0",
"1.0.0",
"1.1.0",
"1.2.0",
"1.3.0",
"1.4.0",
"1.5.0",
],
})
.demandOption(["csi-version"], "csi-version is required")
.option("csi-address", {
@ -49,7 +58,7 @@ const clientIdentity = new csi.Identity(
/**
* Probe the identity service and check for ready state
*
*
* https://github.com/kubernetes-csi/livenessprobe/blob/master/cmd/livenessprobe/main.go
* https://github.com/kubernetes-csi/csi-lib-utils/blob/master/rpc/common.go
*/

1578
csi_proto/csi-v1.4.0.proto Normal file

File diff suppressed because it is too large Load Diff

1635
csi_proto/csi-v1.5.0.proto Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,85 @@
driver: freenas-api-iscsi
instance_id:
httpConnection:
protocol: http
host: server address
port: 80
# use only 1 of apiKey or username/password
# if both are present, apiKey is preferred
# apiKey is only available starting in TrueNAS-12
#apiKey:
username: root
password:
allowInsecure: true
# use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
# leave unset for auto-detection
#apiVersion: 2
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
#
# leave paths unset for auto-detection
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
# total volume name (zvol/<datasetParentName>/<pvc name>) length cannot exceed 63 chars
# https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
# standard volume naming overhead is 46 chars
# datasetParentName should therefore be 17 chars or less
datasetParentName: tank/k8s/b/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
#nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix: csi-
nameSuffix: "-clustera"
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
- targetGroupPortalGroup: 1
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1
# None, CHAP, or CHAP Mutual
targetGroupAuthType: None
# get the correct ID from the "Authorized Access" section of the UI
# only required if using Chap
targetGroupAuthGroup:
extentInsecureTpc: true
extentXenCompat: false
extentDisablePhysicalBlocksize: true
# 512, 1024, 2048, or 4096,
extentBlocksize: 512
# "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
extentRpm: "SSD"
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@ -0,0 +1,58 @@
driver: freenas-api-nfs
instance_id:
httpConnection:
protocol: http
host: server address
port: 80
# use only 1 of apiKey or username/password
# if both are present, apiKey is preferred
# apiKey is only available starting in TrueNAS-12
#apiKey:
username: root
password:
allowInsecure: true
# use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
# leave unset for auto-detection
#apiVersion: 2
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
#
# leave paths unset for auto-detection
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
#datasetPermissionsAcls:
#- "-m everyone@:full_set:allow"
#- "-m u:kube:full_set:allow"
nfs:
shareHost: server address
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -0,0 +1,77 @@
driver: freenas-api-smb
instance_id:
httpConnection:
protocol: http
host: server address
port: 80
# use only 1 of apiKey or username/password
# if both are present, apiKey is preferred
# apiKey is only available starting in TrueNAS-12
#apiKey:
username: root
password:
allowInsecure: true
# use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
# leave unset for auto-detection
#apiVersion: 2
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
#
# leave paths unset for auto-detection
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetProperties:
aclmode: restricted
casesensitivity: mixed
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
datasetPermissionsAcls:
- "-m everyone@:full_set:allow"
#- "-m u:kube:full_set:allow"
smb:
shareHost: server address
nameTemplate: ""
namePrefix: ""
nameSuffix: ""
# if any of the shareFoo parameters do not work with your version of FreeNAS
# simply comment the param (and use the configuration template if necessary)
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: true
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: true
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:

View File

@ -60,8 +60,9 @@ zfs:
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "server:3261"
targetPortals: []
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:

View File

@ -1,4 +1,4 @@
driver: freenas-nfs
driver: freenas-smb
instance_id:
httpConnection:
protocol: http

View File

@ -0,0 +1,10 @@
driver: lustre-client
instance_id:
lustre:
shareHost: server address
shareBasePath: "/some/path"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/storage"
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel

23
examples/node-common.yaml Normal file
View File

@ -0,0 +1,23 @@
# common options for the node service
node:
mount:
# should fsck be executed before mounting the fs
checkFilesystem:
xfs:
enabled: false
customOptions: []
ext4:
enabled: false
customOptions: []
customFilesystemOptions: []
format:
xfs:
customOptions: []
#- -K
# ...
ext4:
customOptions: []
#- -E
#- nodiscard
# ...

10
examples/smb-client.yaml Normal file
View File

@ -0,0 +1,10 @@
driver: smb-client
instance_id:
smb:
shareHost: server address
shareBasePath: "someshare/path"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/storage"
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel

View File

@ -0,0 +1,89 @@
driver: synology-iscsi
httpConnection:
protocol: http
host: server address
port: 5000
username: admin
password: password
allowInsecure: true
# should be uniqe across all installs to the same nas
session: "democratic-csi"
serialize: true
synology:
# choose the proper volume for your system
volume: /volume1
iscsi:
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface: ""
# can be whatever you would like
baseiqn: "iqn.2000-01.com.synology:csi."
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
namePrefix: ""
nameSuffix: ""
# documented below are several blocks
# pick the option appropriate for you based on what your backing fs is and desired features
# you do not need to alter dev_attribs under normal circumstances but they may be altered in advanced use-cases
lunTemplate:
# btrfs thin provisioning
type: "BLUN"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
# 3pc = Hardware-assisted data transfer
# tpu = Space reclamation
# can_snapshot = Snapshot
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
#- dev_attrib: emulate_3pc
# enable: 1
#- dev_attrib: emulate_tpu
# enable: 0
#- dev_attrib: can_snapshot
# enable: 1
# btfs thick provisioning
# only zeroing and locking supported
#type: "BLUN_THICK"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
# ext4 thinn provisioning UI sends everything with enabled=0
#type: "THIN"
# ext4 thin with advanced legacy features set
# can only alter tpu (all others are set as enabled=1)
#type: "ADV"
#dev_attribs:
#- dev_attrib: emulate_tpu
# enable: 1
# ext4 thick
# can only alter caw
#type: "FILE"
#dev_attribs:
#- dev_attrib: emulate_caw
# enable: 1
lunSnapshotTemplate:
is_locked: true
# https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot
is_app_consistent: true
targetTemplate:
auth_type: 0
max_sessions: 0

View File

@ -73,8 +73,10 @@ iscsi:
# mutual CHAP
#mutual_userid: "baz"
#mutual_password: "bar"
targetPortal: "server address"
targetPortals: []
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface: ""
# MUST ensure uniqueness

648
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "democratic-csi",
"version": "1.2.0",
"version": "1.3.0",
"description": "kubernetes csi driver framework",
"main": "bin/democratic-csi",
"scripts": {
@ -18,15 +18,18 @@
"url": "https://github.com/democratic-csi/democratic-csi.git"
},
"dependencies": {
"@grpc/grpc-js": "^1.3.6",
"@grpc/proto-loader": "^0.6.0",
"async-mutex": "^0.3.1",
"bunyan": "^1.8.15",
"grpc-uds": "^0.1.6",
"handlebars": "^4.7.7",
"js-yaml": "^4.0.0",
"lodash": "^4.17.21",
"lru-cache": "^6.0.0",
"request": "^2.88.2",
"semver": "^7.3.4",
"ssh2": "^0.8.9",
"ssh2": "^1.1.0",
"uri-js": "^4.4.1",
"uuid": "^8.3.2",
"winston": "^3.3.3",

View File

@ -0,0 +1,700 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const cp = require("child_process");
const semver = require("semver");
/**
* Crude nfs-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerClientCommonDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
//"GET_CAPACITY",
"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc.push(
"SINGLE_NODE_MULTI_WRITER"
);
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
assertCapabilities(capabilities) {
const driver = this;
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
let fs_types = driver.getFsTypes();
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every((capability) => {
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!fs_types.includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
});
return { valid, message };
}
// share paths
getShareBasePath() {
let config_key = this.getConfigKey();
let path = this.options[config_key].shareBasePath;
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing shareBasePath`
);
}
path = path.replace(/\/$/, "");
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing shareBasePath`
);
}
return path;
}
// controller paths
getControllerBasePath() {
let config_key = this.getConfigKey();
let path = this.options[config_key].controllerBasePath;
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing controllerBasePath`
);
}
path = path.replace(/\/$/, "");
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing controllerBasePath`
);
}
return path;
}
// path helpers
getVolumeExtraPath() {
return "/v";
}
getSnapshotExtraPath() {
return "/s";
}
getShareVolumeBasePath() {
return this.getShareBasePath() + this.getVolumeExtraPath();
}
getShareSnapshotBasePath() {
return this.getShareBasePath() + this.getSnapshotExtraPath();
}
getShareVolumePath(volume_id) {
return this.getShareVolumeBasePath() + "/" + volume_id;
}
getShareSnapshotPath(snapshot_id) {
return this.getShareSnapshotBasePath() + "/" + snapshot_id;
}
getControllerVolumeBasePath() {
return this.getControllerBasePath() + this.getVolumeExtraPath();
}
getControllerSnapshotBasePath() {
return this.getControllerBasePath() + this.getSnapshotExtraPath();
}
getControllerVolumePath(volume_id) {
return this.getControllerVolumeBasePath() + "/" + volume_id;
}
getControllerSnapshotPath(snapshot_id) {
return this.getControllerSnapshotBasePath() + "/" + snapshot_id;
}
exec(command, args, options = {}) {
args = args || [];
let timeout;
let stdout = "";
let stderr = "";
if (options.sudo) {
args.unshift(command);
command = "sudo";
}
console.log("executing command: %s %s", command, args.join(" "));
const child = cp.spawn(command, args, options);
let didTimeout = false;
if (options && options.timeout) {
timeout = setTimeout(() => {
didTimeout = true;
child.kill(options.killSignal || "SIGTERM");
}, options.timeout);
}
return new Promise((resolve, reject) => {
child.stdout.on("data", function (data) {
stdout = stdout + data;
});
child.stderr.on("data", function (data) {
stderr = stderr + data;
});
child.on("close", function (code) {
const result = { code, stdout, stderr };
if (timeout) {
clearTimeout(timeout);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
stripTrailingSlash(s) {
if (s.length > 1) {
return s.replace(/\/$/, "");
}
return s;
}
stripLeadingSlash(s) {
if (s.length > 1) {
return s.replace(/^\/+/, "");
}
return s;
}
async cloneDir(source_path, target_path) {
await this.exec("mkdir", ["-p", target_path]);
/**
* trailing / is important
* rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/
*/
await this.exec("rsync", [
"-a",
this.stripTrailingSlash(source_path) + "/",
this.stripTrailingSlash(target_path) + "/",
]);
}
async getAvailableSpaceAtPath(path) {
//df --output=avail /mnt/storage/
// Avail
//1481334328
const response = await this.exec("df", ["--output=avail", path]);
return response.stdout.split("\n")[1].trim();
}
async deleteDir(path) {
await this.exec("rm", ["-rf", path]);
return;
/**
* trailing / is important
* rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/
*/
await this.exec("rsync", [
"-a",
"--delete",
this.stripTrailingSlash(empty_path) + "/",
this.stripTrailingSlash(path) + "/",
]);
}
/**
* Create a volume doing in essence the following:
* 1. create directory
*
* Should return 2 parameters
* 1. `server` - host/ip of the nfs server
* 2. `share` - path of the mount shared
*
* @param {*} call
*/
async CreateVolume(call) {
const driver = this;
let config_key = this.getConfigKey();
let name = call.request.name;
let volume_content_source = call.request.volume_content_source;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
const volume_path = driver.getControllerVolumePath(name);
let response;
let source_path;
//let volume_content_source_snapshot_id;
//let volume_content_source_volume_id;
// create target dir
response = await driver.exec("mkdir", ["-p", volume_path]);
// create dataset
if (volume_content_source) {
switch (volume_content_source.type) {
// must be available when adverstising CREATE_DELETE_SNAPSHOT
// simply clone
case "snapshot":
source_path = driver.getControllerSnapshotPath(
volume_content_source.snapshot.snapshot_id
);
break;
// must be available when adverstising CLONE_VOLUME
// create snapshot first, then clone
case "volume":
source_path = driver.getControllerVolumePath(
volume_content_source.volume.volume_id
);
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`invalid volume_content_source type: ${volume_content_source.type}`
);
break;
}
driver.ctx.logger.debug("controller source path: %s", source_path);
response = await driver.cloneDir(source_path, volume_path);
}
// set mode
if (this.options[config_key].dirPermissionsMode) {
driver.ctx.logger.verbose(
"setting dir mode to: %s on dir: %s",
this.options[config_key].dirPermissionsMode,
volume_path
);
response = await driver.exec("chmod", [
this.options[config_key].dirPermissionsMode,
volume_path,
]);
}
// set ownership
if (
this.options[config_key].dirPermissionsUser ||
this.options[config_key].dirPermissionsGroup
) {
driver.ctx.logger.verbose(
"setting ownership to: %s:%s on dir: %s",
this.options[config_key].dirPermissionsUser,
this.options[config_key].dirPermissionsGroup,
volume_path
);
response = await driver.exec("chown", [
(this.options[config_key].dirPermissionsUser
? this.options[config_key].dirPermissionsUser
: "") +
":" +
(this.options[config_key].dirPermissionsGroup
? this.options[config_key].dirPermissionsGroup
: ""),
volume_path,
]);
}
let volume_context = driver.getVolumeContext(name);
volume_context["provisioner_driver"] = driver.options.driver;
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id: name,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* Delete a volume
*
* Deleting a volume consists of the following steps:
* 1. delete directory
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
let name = call.request.volume_id;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
const volume_path = driver.getControllerVolumePath(name);
await driver.deleteDir(volume_path);
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
// really capacity is not used at all with nfs in this fashion, so no reason to enable
// here even though it is technically feasible.
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
const driver = this;
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { available_capacity: 0 };
}
}
const available_capacity = await driver.getAvailableSpaceAtPath(
driver.getControllerBasePath()
);
return { available_capacity };
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
const driver = this;
// both these are required
let source_volume_id = call.request.source_volume_id;
let name = call.request.name;
if (!source_volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot source_volume_id is required`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name is required`
);
}
driver.ctx.logger.verbose("requested snapshot name: %s", name);
let invalid_chars;
invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi);
if (invalid_chars) {
invalid_chars = String.prototype.concat(
...new Set(invalid_chars.join(""))
);
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name contains invalid characters: ${invalid_chars}`
);
}
// https://stackoverflow.com/questions/32106243/regex-to-remove-all-non-alpha-numeric-and-replace-spaces-with/32106277
name = name.replace(/[^a-z0-9_\-:.+]+/gi, "");
driver.ctx.logger.verbose("cleansed snapshot name: %s", name);
const snapshot_id = `${source_volume_id}-${name}`;
const volume_path = driver.getControllerVolumePath(source_volume_id);
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.cloneDir(volume_path, snapshot_path);
return {
snapshot: {
/**
* The purpose of this field is to give CO guidance on how much space
* is needed to create a volume from this snapshot.
*/
size_bytes: 0,
snapshot_id,
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: Math.round(new Date().getTime() / 1000),
nanos: 0,
},
ready_to_use: true,
},
};
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
const driver = this;
const snapshot_id = call.request.snapshot_id;
if (!snapshot_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot_id is required`
);
}
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.deleteDir(snapshot_path);
return {};
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ControllerClientCommonDriver = ControllerClientCommonDriver;

View File

@ -0,0 +1,31 @@
const { ControllerClientCommonDriver } = require("../controller-client-common");
/**
* Crude lustre-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerLustreClientDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
super(...arguments);
}
getConfigKey() {
return "lustre";
}
getVolumeContext(name) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "lustre",
server: this.options[config_key].shareHost,
share: driver.getShareVolumePath(name),
};
}
getFsTypes() {
return ["lustre"];
}
}
module.exports.ControllerLustreClientDriver = ControllerLustreClientDriver;

View File

@ -1,663 +1,30 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const cp = require("child_process");
const { Mount } = require("../../utils/mount");
const { ControllerClientCommonDriver } = require("../controller-client-common");
/**
* Crude nfs-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerNfsClientDriver extends CsiBaseDriver {
class ControllerNfsClientDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
//"GET_CAPACITY",
"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
}
}
assertCapabilities(capabilities) {
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every((capability) => {
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!["nfs"].includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
});
return { valid, message };
getConfigKey() {
return "nfs";
}
// path helpers
getVolumeExtraPath() {
return "/v";
}
getSnapshotExtraPath() {
return "/s";
}
// share paths
getShareBasePath() {
let path = this.options.nfs.shareBasePath;
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing shareBasePath`
);
}
path = path.replace(/\/$/, "");
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing shareBasePath`
);
}
return path;
}
getShareVolumeBasePath() {
return this.getShareBasePath() + this.getVolumeExtraPath();
}
getShareSnapshotBasePath() {
return this.getShareBasePath() + this.getSnapshotExtraPath();
}
getShareVolumePath(volume_id) {
return this.getShareVolumeBasePath() + "/" + volume_id;
}
getShareSnapshotPath(snapshot_id) {
return this.getShareSnapshotBasePath() + "/" + snapshot_id;
}
// controller paths
getControllerBasePath() {
let path = this.options.nfs.controllerBasePath;
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing controllerBasePath`
);
}
path = path.replace(/\/$/, "");
if (!path) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing controllerBasePath`
);
}
return path;
}
getControllerVolumeBasePath() {
return this.getControllerBasePath() + this.getVolumeExtraPath();
}
getControllerSnapshotBasePath() {
return this.getControllerBasePath() + this.getSnapshotExtraPath();
}
getControllerVolumePath(volume_id) {
return this.getControllerVolumeBasePath() + "/" + volume_id;
}
getControllerSnapshotPath(snapshot_id) {
return this.getControllerSnapshotBasePath() + "/" + snapshot_id;
}
exec(command, args, options = {}) {
args = args || [];
let timeout;
let stdout = "";
let stderr = "";
if (options.sudo) {
args.unshift(command);
command = "sudo";
}
console.log("executing command: %s %s", command, args.join(" "));
const child = cp.spawn(command, args, options);
let didTimeout = false;
if (options && options.timeout) {
timeout = setTimeout(() => {
didTimeout = true;
child.kill(options.killSignal || "SIGTERM");
}, options.timeout);
}
return new Promise((resolve, reject) => {
child.stdout.on("data", function (data) {
stdout = stdout + data;
});
child.stderr.on("data", function (data) {
stderr = stderr + data;
});
child.on("close", function (code) {
const result = { code, stdout, stderr };
if (timeout) {
clearTimeout(timeout);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
stripTrailingSlash(s) {
if (s.length > 1) {
return s.replace(/\/$/, "");
}
return s;
}
async cloneDir(source_path, target_path) {
await this.exec("mkdir", ["-p", target_path]);
/**
* trailing / is important
* rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/
*/
await this.exec("rsync", [
"-a",
this.stripTrailingSlash(source_path) + "/",
this.stripTrailingSlash(target_path) + "/",
]);
}
async getAvailableSpaceAtPath(path) {
//df --output=avail /mnt/storage/
// Avail
//1481334328
const response = await this.exec("df", ["--output=avail", path]);
return response.stdout.split("\n")[1].trim();
}
async deleteDir(path) {
await this.exec("rm", ["-rf", path]);
return;
/**
* trailing / is important
* rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/
*/
await this.exec("rsync", [
"-a",
"--delete",
this.stripTrailingSlash(empty_path) + "/",
this.stripTrailingSlash(path) + "/",
]);
}
/**
* Create a volume doing in essence the following:
* 1. create directory
*
* Should return 2 parameters
* 1. `server` - host/ip of the nfs server
* 2. `share` - path of the mount shared
*
* @param {*} call
*/
async CreateVolume(call) {
getVolumeContext(name) {
const driver = this;
let name = call.request.name;
let volume_content_source = call.request.volume_content_source;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
const volume_path = driver.getControllerVolumePath(name);
let response;
let source_path;
//let volume_content_source_snapshot_id;
//let volume_content_source_volume_id;
// create target dir
response = await driver.exec("mkdir", ["-p", volume_path]);
// create dataset
if (volume_content_source) {
switch (volume_content_source.type) {
// must be available when adverstising CREATE_DELETE_SNAPSHOT
// simply clone
case "snapshot":
source_path = driver.getControllerSnapshotPath(
volume_content_source.snapshot.snapshot_id
);
break;
// must be available when adverstising CLONE_VOLUME
// create snapshot first, then clone
case "volume":
source_path = driver.getControllerVolumePath(
volume_content_source.volume.volume_id
);
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`invalid volume_content_source type: ${volume_content_source.type}`
);
break;
}
driver.ctx.logger.debug("controller source path: %s", source_path);
response = await driver.cloneDir(source_path, volume_path);
}
// set mode
if (this.options.nfs.dirPermissionsMode) {
driver.ctx.logger.verbose(
"setting dir mode to: %s on dir: %s",
this.options.nfs.dirPermissionsMode,
volume_path
);
response = await driver.exec("chmod", [
this.options.nfs.dirPermissionsMode,
volume_path,
]);
}
// set ownership
if (
this.options.nfs.dirPermissionsUser ||
this.options.nfs.dirPermissionsGroup
) {
driver.ctx.logger.verbose(
"setting ownership to: %s:%s on dir: %s",
this.options.nfs.dirPermissionsUser,
this.options.nfs.dirPermissionsGroup,
volume_path
);
response = await driver.exec("chown", [
(this.options.nfs.dirPermissionsUser
? this.options.nfs.dirPermissionsUser
: "") +
":" +
(this.options.nfs.dirPermissionsGroup
? this.options.nfs.dirPermissionsGroup
: ""),
volume_path,
]);
}
let volume_context = {
const config_key = driver.getConfigKey();
return {
node_attach_driver: "nfs",
server: this.options.nfs.shareHost,
server: this.options[config_key].shareHost,
share: driver.getShareVolumePath(name),
};
volume_context["provisioner_driver"] = driver.options.driver;
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id: name,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* Delete a volume
*
* Deleting a volume consists of the following steps:
* 1. delete directory
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
let name = call.request.volume_id;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
const volume_path = driver.getControllerVolumePath(name);
await driver.deleteDir(volume_path);
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
// really capacity is not used at all with nfs in this fashion, so no reason to enable
// here even though it is technically feasible.
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
const driver = this;
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { available_capacity: 0 };
}
}
const available_capacity = await driver.getAvailableSpaceAtPath(
driver.getControllerBasePath()
);
return { available_capacity };
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
const driver = this;
// both these are required
let source_volume_id = call.request.source_volume_id;
let name = call.request.name;
if (!source_volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot source_volume_id is required`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name is required`
);
}
driver.ctx.logger.verbose("requested snapshot name: %s", name);
let invalid_chars;
invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi);
if (invalid_chars) {
invalid_chars = String.prototype.concat(
...new Set(invalid_chars.join(""))
);
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name contains invalid characters: ${invalid_chars}`
);
}
// https://stackoverflow.com/questions/32106243/regex-to-remove-all-non-alpha-numeric-and-replace-spaces-with/32106277
name = name.replace(/[^a-z0-9_\-:.+]+/gi, "");
driver.ctx.logger.verbose("cleansed snapshot name: %s", name);
const snapshot_id = `${source_volume_id}-${name}`;
const volume_path = driver.getControllerVolumePath(source_volume_id);
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.cloneDir(volume_path, snapshot_path);
return {
snapshot: {
/**
* The purpose of this field is to give CO guidance on how much space
* is needed to create a volume from this snapshot.
*/
size_bytes: 0,
snapshot_id,
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: Math.round(new Date().getTime() / 1000),
nanos: 0,
},
ready_to_use: true,
},
};
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
const driver = this;
const snapshot_id = call.request.snapshot_id;
if (!snapshot_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot_id is required`
);
}
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.deleteDir(snapshot_path);
return {};
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
getFsTypes() {
return ["nfs"];
}
}

View File

@ -0,0 +1,31 @@
const { ControllerClientCommonDriver } = require("../controller-client-common");
/**
* Crude smb-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerSmbClientDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
super(...arguments);
}
getConfigKey() {
return "smb";
}
getVolumeContext(name) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "smb",
server: this.options[config_key].shareHost,
share: driver.stripLeadingSlash(driver.getShareVolumePath(name)),
};
}
getFsTypes() {
return ["cifs"];
}
}
module.exports.ControllerSmbClientDriver = ControllerSmbClientDriver;

View File

@ -0,0 +1,527 @@
const request = require("request");
const Mutex = require("async-mutex").Mutex;
const USER_AGENT = "democratic-csi";
class SynologyHttpClient {
constructor(options = {}) {
this.options = JSON.parse(JSON.stringify(options));
this.logger = console;
this.doLoginMutex = new Mutex();
this.apiSerializeMutex = new Mutex();
if (false) {
setInterval(() => {
console.log("WIPING OUT SYNOLOGY SID");
this.sid = null;
}, 5 * 1000);
}
}
async login() {
if (!this.sid) {
const data = {
api: "SYNO.API.Auth",
version: "2",
method: "login",
account: this.options.username,
passwd: this.options.password,
session: this.options.session,
format: "sid",
};
let response = await this.do_request("GET", "auth.cgi", data);
this.sid = response.body.data.sid;
}
return this.sid;
}
log_response(error, response, body, options) {
this.logger.debug("SYNOLOGY HTTP REQUEST: " + JSON.stringify(options));
this.logger.debug("SYNOLOGY HTTP ERROR: " + error);
this.logger.debug("SYNOLOGY HTTP STATUS: " + response.statusCode);
this.logger.debug(
"SYNOLOGY HTTP HEADERS: " + JSON.stringify(response.headers)
);
this.logger.debug("SYNOLOGY HTTP BODY: " + JSON.stringify(body));
}
async do_request(method, path, data = {}, options = {}) {
const client = this;
const isAuth = data.api == "SYNO.API.Auth" && data.method == "login";
let sid;
let apiMutexRelease;
if (!isAuth) {
sid = await this.doLoginMutex.runExclusive(async () => {
return await this.login();
});
}
const invoke_options = options;
if (!isAuth) {
if (this.options.serialize) {
apiMutexRelease = await this.apiSerializeMutex.acquire();
}
}
return new Promise((resolve, reject) => {
if (!isAuth) {
data._sid = sid;
}
const options = {
method: method,
url: `${this.options.protocol}://${this.options.host}:${this.options.port}/webapi/${path}`,
headers: {
Accept: "application/json",
"User-Agent": USER_AGENT,
"Content-Type": invoke_options.use_form_encoded
? "application/x-www-form-urlencoded"
: "application/json",
},
json: invoke_options.use_form_encoded ? false : true,
agentOptions: {
rejectUnauthorized: !!!client.options.allowInsecure,
},
};
switch (method) {
case "GET":
let qsData = JSON.parse(JSON.stringify(data));
for (let p in qsData) {
if (Array.isArray(qsData[p]) || typeof qsData[p] == "boolean") {
qsData[p] = JSON.stringify(qsData[p]);
}
}
options.qs = qsData;
break;
default:
if (invoke_options.use_form_encoded) {
//options.body = URLSearchParams(data);
options.form = data;
} else {
options.body = data;
}
break;
}
try {
request(options, function (error, response, body) {
client.log_response(...arguments, options);
if (error) {
reject(error);
}
if (
typeof response.body !== "object" &&
response.body !== null &&
response.headers["content-type"] &&
response.headers["content-type"].includes("application/json")
) {
response.body = JSON.parse(response.body);
}
if (response.statusCode > 299 || response.statusCode < 200) {
reject(response);
}
if (response.body.success === false) {
// remove invalid sid
if (response.body.error.code == 119 && sid == client.sid) {
client.sid = null;
}
reject(response);
}
resolve(response);
});
} finally {
if (typeof apiMutexRelease == "function") {
apiMutexRelease();
}
}
});
}
async GetLunUUIDByName(name) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == name;
});
if (lun) {
return lun.uuid;
}
}
async GetLunIDByName(name) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == name;
});
if (lun) {
return lun.lun_id;
}
}
async GetLunByID(lun_id) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.lun_id == lun_id;
});
if (lun) {
return lun;
}
}
async GetLunByName(name) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == name;
});
if (lun) {
return lun;
}
}
async GetSnapshotByLunIDAndName(lun_id, name) {
const get_snapshot_info = {
lid: lun_id, //check?
api: "SYNO.Core.Storage.iSCSILUN",
method: "load_snapshot",
version: 1,
};
let response = await this.do_request("GET", "entry.cgi", get_snapshot_info);
if (response.body.data) {
let snapshot = response.body.data.find((i) => {
return i.desc == name;
});
if (snapshot) {
return snapshot;
}
}
}
async GetSnapshotByLunIDAndSnapshotUUID(lun_id, snapshot_uuid) {
const get_snapshot_info = {
lid: lun_id, //check?
api: "SYNO.Core.Storage.iSCSILUN",
method: "load_snapshot",
version: 1,
};
let response = await this.do_request("GET", "entry.cgi", get_snapshot_info);
if (response.body.data) {
let snapshot = response.body.data.find((i) => {
return i.uuid == snapshot_uuid;
});
if (snapshot) {
return snapshot;
}
}
}
async DeleteSnapshot(snapshot_uuid) {
const iscsi_snapshot_delete = {
api: "SYNO.Core.ISCSI.LUN",
method: "delete_snapshot",
version: 1,
snapshot_uuid: JSON.stringify(snapshot_uuid), // snapshot_id
deleted_by: "democratic_csi", // ?
};
let response = await this.do_request(
"GET",
"entry.cgi",
iscsi_snapshot_delete
);
// return?
}
async GetVolumeInfo(volume_path) {
let data = {
api: "SYNO.Core.Storage.Volume",
method: "get",
version: "1",
//volume_path: "/volume1",
volume_path,
};
return await this.do_request("GET", "entry.cgi", data);
}
async GetTargetByTargetID(target_id) {
let targets = await this.ListTargets();
let target = targets.find((i) => {
return i.target_id == target_id;
});
return target;
}
async GetTargetByIQN(iqn) {
let targets = await this.ListTargets();
let target = targets.find((i) => {
return i.iqn == iqn;
});
return target;
}
async ListTargets() {
const iscsi_target_list = {
api: "SYNO.Core.ISCSI.Target",
version: "1",
path: "entry.cgi",
method: "list",
additional: '["mapped_lun", "status", "acls", "connected_sessions"]',
};
let response = await this.do_request("GET", "entry.cgi", iscsi_target_list);
return response.body.data.targets;
}
async CreateLun(data = {}) {
let response;
let iscsi_lun_create = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "create",
});
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
try {
response = await this.do_request("GET", "entry.cgi", iscsi_lun_create);
return response.body.data.uuid;
} catch (err) {
if ([18990538].includes(err.body.error.code)) {
response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == iscsi_lun_create.name;
});
return lun.uuid;
} else {
throw err;
}
}
}
async MapLun(data = {}) {
// this is mapping from the perspective of the lun
let iscsi_target_map = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.LUN",
method: "map_target",
version: "1",
});
iscsi_target_map.uuid = JSON.stringify(iscsi_target_map.uuid);
iscsi_target_map.target_ids = JSON.stringify(iscsi_target_map.target_ids);
// this is mapping from the perspective of the target
/*
iscsi_target_map = Object.assign(data, {
api: "SYNO.Core.ISCSI.Target",
method: "map_lun",
version: "1",
});
iscsi_target_map.lun_uuids = JSON.stringify(iscsi_target_map.lun_uuids);
*/
await this.do_request("GET", "entry.cgi", iscsi_target_map);
}
async DeleteLun(uuid) {
uuid = uuid || "";
let iscsi_lun_delete = {
api: "SYNO.Core.ISCSI.LUN",
method: "delete",
version: 1,
//uuid: uuid,
uuid: JSON.stringify(""),
uuids: JSON.stringify([uuid]),
//is_soft_feas_ignored: false,
is_soft_feas_ignored: true,
//feasibility_precheck: true,
};
await this.do_request("GET", "entry.cgi", iscsi_lun_delete);
}
async DeleteAllLuns() {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
for (let lun of response.body.data.luns) {
await this.DeleteLun(lun.uuid);
}
}
async CreateSnapshot(data) {
data = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.LUN",
method: "take_snapshot",
version: 1,
});
data.src_lun_uuid = JSON.stringify(data.src_lun_uuid);
return await this.do_request("GET", "entry.cgi", data);
}
async CreateTarget(data = {}) {
let iscsi_target_create = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.Target",
version: "1",
method: "create",
});
let response;
try {
response = await this.do_request("GET", "entry.cgi", iscsi_target_create);
return response.body.data.target_id;
} catch (err) {
if ([18990744].includes(err.body.error.code)) {
//do lookup
const iscsi_target_list = {
api: "SYNO.Core.ISCSI.Target",
version: "1",
path: "entry.cgi",
method: "list",
additional: '["mapped_lun", "status", "acls", "connected_sessions"]',
};
response = await this.do_request("GET", "entry.cgi", iscsi_target_list);
let target = response.body.data.targets.find((i) => {
return i.iqn == iscsi_target_create.iqn;
});
if (target) {
return target.target_id;
} else {
throw err;
}
} else {
throw err;
}
}
}
async DeleteTarget(target_id) {
const iscsi_target_delete = {
api: "SYNO.Core.ISCSI.Target",
method: "delete",
version: "1",
path: "entry.cgi",
};
try {
await this.do_request(
"GET",
"entry.cgi",
Object.assign({}, iscsi_target_delete, {
target_id: JSON.stringify(String(target_id || "")),
})
);
} catch (err) {
/**
* 18990710 = non-existant
*/
//if (![18990710].includes(err.body.error.code)) {
throw err;
//}
}
}
async ExpandISCSILun(uuid, size) {
const iscsi_lun_extend = {
api: "SYNO.Core.ISCSI.LUN",
method: "set",
version: 1,
};
return await this.do_request(
"GET",
"entry.cgi",
Object.assign({}, iscsi_lun_extend, {
uuid: JSON.stringify(uuid),
new_size: size,
})
);
}
async CreateClonedVolume(src_lun_uuid, dst_lun_name) {
const create_cloned_volume = {
api: "SYNO.Core.ISCSI.LUN",
version: 1,
method: "clone",
src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid
dst_lun_name: dst_lun_name, // dst lun name
is_same_pool: true, // always true? string?
clone_type: "democratic-csi", // check
};
return await this.do_request("GET", "entry.cgi", create_cloned_volume);
}
async CreateVolumeFromSnapshot(src_lun_uuid, snapshot_uuid, cloned_lun_name) {
const create_volume_from_snapshot = {
api: "SYNO.Core.ISCSI.LUN",
version: 1,
method: "clone_snapshot",
src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid, snapshot id?
snapshot_uuid: JSON.stringify(snapshot_uuid), // shaptop uuid
cloned_lun_name: cloned_lun_name, // cloned lun name
clone_type: "democratic-csi", // check
};
return await this.do_request(
"GET",
"entry.cgi",
create_volume_from_snapshot
);
}
}
module.exports.SynologyHttpClient = SynologyHttpClient;

View File

@ -0,0 +1,867 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const SynologyHttpClient = require("./http").SynologyHttpClient;
const semver = require("semver");
const sleep = require("../../utils/general").sleep;
/**
*
* Driver to provision storage on a synology device
*
*/
class ControllerSynologyDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
const driverResourceType = this.getDriverResourceType();
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
"GET_CAPACITY",
"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
"CLONE_VOLUME",
//"PUBLISH_READONLY",
"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME" (would need to properly handle volume_content_source)
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc.push(
"SINGLE_NODE_MULTI_WRITER"
);
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME",
];
if (driverResourceType == "volume") {
options.service.node.capabilities.rpc.push("EXPAND_VOLUME");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
async getHttpClient() {
if (!this.httpClient) {
this.httpClient = new SynologyHttpClient(this.options.httpConnection);
}
return this.httpClient;
}
getDriverResourceType() {
switch (this.options.driver) {
case "synology-nfs":
case "synology-smb":
return "filesystem";
case "synology-iscsi":
return "volume";
default:
throw new Error("unknown driver: " + this.ctx.args.driver);
}
}
getDriverShareType() {
switch (this.options.driver) {
case "synology-nfs":
return "nfs";
case "synology-smb":
return "smb";
case "synology-iscsi":
return "iscsi";
default:
throw new Error("unknown driver: " + this.ctx.args.driver);
}
}
buildIscsiName(name) {
let iscsiName = name;
if (this.options.iscsi.namePrefix) {
iscsiName = this.options.iscsi.namePrefix + iscsiName;
}
if (this.options.iscsi.nameSuffix) {
iscsiName += this.options.iscsi.nameSuffix;
}
return iscsiName.toLowerCase();
}
assertCapabilities(capabilities) {
const driverResourceType = this.getDriverResourceType();
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every((capability) => {
switch (driverResourceType) {
case "filesystem":
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!["nfs", "cifs"].includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
case "volume":
if (capability.access_type == "mount") {
if (
capability.mount.fs_type &&
!["ext3", "ext4", "ext4dev", "xfs"].includes(
capability.mount.fs_type
)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
}
});
return { valid, message };
}
/**
*
* CreateVolume
*
* @param {*} call
*/
async CreateVolume(call) {
const driver = this;
const httpClient = await driver.getHttpClient();
let name = call.request.name;
let volume_content_source = call.request.volume_content_source;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
let volume_context = {};
switch (driver.getDriverShareType()) {
case "nfs":
// TODO: create volume here
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
case "smb":
// TODO: create volume here
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
case "iscsi":
let iscsiName = driver.buildIscsiName(name);
let data;
let target;
let lun_mapping;
let lun_uuid;
let existingLun;
if (volume_content_source) {
let src_lun_uuid;
let src_lun_id;
switch (volume_content_source.type) {
case "snapshot":
let parts = volume_content_source.snapshot.snapshot_id.split("/");
src_lun_id = parts[2];
let snapshot_uuid = parts[3];
let src_lun = await httpClient.GetLunByID(src_lun_id);
src_lun_uuid = src_lun.uuid;
existingLun = await httpClient.GetLunByName(iscsiName);
if (!existingLun) {
await httpClient.CreateVolumeFromSnapshot(
src_lun_uuid,
snapshot_uuid,
iscsiName
);
}
break;
case "volume":
existingLun = await httpClient.GetLunByName(iscsiName);
if (!existingLun) {
let srcLunName = driver.buildIscsiName(
volume_content_source.volume.volume_id
);
src_lun_uuid = await httpClient.GetLunUUIDByName(srcLunName);
await httpClient.CreateClonedVolume(src_lun_uuid, iscsiName);
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`invalid volume_content_source type: ${volume_content_source.type}`
);
break;
}
// resize to requested amount
let lun = await httpClient.GetLunByName(iscsiName);
lun_uuid = lun.uuid;
if (lun.size < capacity_bytes) {
await httpClient.ExpandISCSILun(lun_uuid, capacity_bytes);
}
} else {
// create lun
data = Object.assign({}, driver.options.iscsi.lunTemplate, {
name: iscsiName,
location: driver.options.synology.volume,
size: capacity_bytes,
});
lun_uuid = await httpClient.CreateLun(data);
}
// create target
let iqn = driver.options.iscsi.baseiqn + iscsiName;
data = Object.assign({}, driver.options.iscsi.targetTemplate, {
name: iscsiName,
iqn,
});
let target_id = await httpClient.CreateTarget(data);
//target = await httpClient.GetTargetByTargetID(target_id);
target = await httpClient.GetTargetByIQN(iqn);
if (!target) {
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to lookup target: ${iqn}`
);
}
target_id = target.target_id;
// check if mapping of lun <-> target already exists
lun_mapping = target.mapped_luns.find((lun) => {
return lun.lun_uuid == lun_uuid;
});
// create mapping if not present already
if (!lun_mapping) {
data = {
uuid: lun_uuid,
target_ids: [target_id],
};
/*
data = {
lun_uuids: [lun_uuid],
target_id: target_id,
};
*/
await httpClient.MapLun(data);
// re-retrieve target to ensure proper lun (mapping_index) value is returned
target = await httpClient.GetTargetByTargetID(target_id);
lun_mapping = target.mapped_luns.find((lun) => {
return lun.lun_uuid == lun_uuid;
});
}
if (!lun_mapping) {
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to lookup lun_mapping_id`
);
}
volume_context = {
node_attach_driver: "iscsi",
portal: driver.options.iscsi.targetPortal || "",
portals: driver.options.iscsi.targetPortals
? driver.options.iscsi.targetPortals.join(",")
: "",
interface: driver.options.iscsi.interface || "",
iqn,
lun: lun_mapping.mapping_index,
};
break;
default:
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
}
volume_context["provisioner_driver"] = driver.options.driver;
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id: name,
capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* DeleteVolume
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
const httpClient = await driver.getHttpClient();
let name = call.request.volume_id;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
let response;
switch (driver.getDriverShareType()) {
case "nfs":
// TODO: delete volume here
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
case "smb":
// TODO: delete volume here
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
case "iscsi":
//await httpClient.DeleteAllLuns();
let iscsiName = driver.buildIscsiName(name);
let iqn = driver.options.iscsi.baseiqn + iscsiName;
let target = await httpClient.GetTargetByIQN(iqn);
if (target) {
await httpClient.DeleteTarget(target.target_id);
}
let lun_uuid = await httpClient.GetLunUUIDByName(iscsiName);
if (lun_uuid) {
// this is an async process where a success is returned but delete is happening still behind the scenes
// therefore we continue to search for the lun after delete success call to ensure full deletion
await httpClient.DeleteLun(lun_uuid);
//let settleEnabled = driver.options.api.lunDelete.settleEnabled;
let settleEnabled = true;
if (settleEnabled) {
let currentCheck = 0;
/*
let settleMaxRetries =
driver.options.api.lunDelete.settleMaxRetries || 6;
let settleSeconds = driver.options.api.lunDelete.settleSeconds || 5;
*/
let settleMaxRetries = 6;
let settleSeconds = 5;
let waitTimeBetweenChecks = settleSeconds * 1000;
await sleep(waitTimeBetweenChecks);
lun_uuid = await httpClient.GetLunUUIDByName(iscsiName);
while (currentCheck <= settleMaxRetries && lun_uuid) {
currentCheck++;
await sleep(waitTimeBetweenChecks);
lun_uuid = await httpClient.GetLunUUIDByName(iscsiName);
}
if (lun_uuid) {
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to remove lun: ${lun_uuid}`
);
}
}
}
break;
default:
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
}
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
const driver = this;
const httpClient = await driver.getHttpClient();
let name = call.request.volume_id;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`required_bytes is greather than limit_bytes`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
let node_expansion_required = false;
let response;
switch (driver.getDriverShareType()) {
case "nfs":
// TODO: expand volume here
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
case "smb":
// TODO: expand volume here
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
case "iscsi":
node_expansion_required = true;
let iscsiName = driver.buildIscsiName(name);
response = await httpClient.GetLunUUIDByName(iscsiName);
await httpClient.ExpandISCSILun(response, capacity_bytes);
break;
default:
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
break;
}
return {
capacity_bytes,
node_expansion_required,
};
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
const driver = this;
const httpClient = await driver.getHttpClient();
if (!driver.options.synology.volume) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing volume`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { available_capacity: 0 };
}
}
let response = await httpClient.GetVolumeInfo(
driver.options.synology.volume
);
return { available_capacity: response.body.data.volume.size_free_byte };
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
const driver = this;
const httpClient = await driver.getHttpClient();
// both these are required
let source_volume_id = call.request.source_volume_id;
let name = call.request.name;
if (!source_volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot source_volume_id is required`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name is required`
);
}
driver.ctx.logger.verbose("requested snapshot name: %s", name);
let invalid_chars;
invalid_chars = name.match(/[^a-z0-9_\-:.+]+/gi);
if (invalid_chars) {
invalid_chars = String.prototype.concat(
...new Set(invalid_chars.join(""))
);
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot name contains invalid characters: ${invalid_chars}`
);
}
// create snapshot here
let iscsiName = driver.buildIscsiName(source_volume_id);
let lun = await httpClient.GetLunByName(iscsiName);
if (!lun) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`invalid source_volume_id: ${source_volume_id}`
);
}
// check for already exists
let snapshot = await httpClient.GetSnapshotByLunIDAndName(lun.lun_id, name);
if (snapshot) {
return {
snapshot: {
/**
* The purpose of this field is to give CO guidance on how much space
* is needed to create a volume from this snapshot.
*/
size_bytes: 0,
snapshot_id: `/lun/${lun.lun_id}/${snapshot.uuid}`, // add shanpshot_uuid //fixme
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: snapshot.time,
nanos: 0,
},
ready_to_use: true,
},
};
}
let data = Object.assign({}, driver.options.iscsi.lunSnapshotTemplate, {
src_lun_uuid: lun.uuid,
taken_by: "democratic-csi",
description: name, //check
});
let response = await httpClient.CreateSnapshot(data);
return {
snapshot: {
/**
* The purpose of this field is to give CO guidance on how much space
* is needed to create a volume from this snapshot.
*/
size_bytes: 0,
snapshot_id: `/lun/${lun.lun_id}/${response.body.data.snapshot_uuid}`,
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: Math.round(new Date().getTime() / 1000),
nanos: 0,
},
ready_to_use: true,
},
};
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
// throw new GrpcError(
// grpc.status.UNIMPLEMENTED,
// `operation not supported by driver`
// );
const driver = this;
const httpClient = await driver.getHttpClient();
const snapshot_id = call.request.snapshot_id;
if (!snapshot_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`snapshot_id is required`
);
}
let parts = snapshot_id.split("/");
let lun_id = parts[2];
let snapshot_uuid = parts[3];
// TODO: delete snapshot
let snapshot = await httpClient.GetSnapshotByLunIDAndSnapshotUUID(
lun_id,
snapshot_uuid
);
if (snapshot) {
await httpClient.DeleteSnapshot(snapshot.uuid);
}
return {};
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ControllerSynologyDriver = ControllerSynologyDriver;

View File

@ -47,8 +47,9 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver {
]
) {
await zb.zfs.set(datasetName, {
[key]: this.options.nfs.shareStrategySetDatasetProperties
.properties[key],
[key]:
this.options.nfs.shareStrategySetDatasetProperties
.properties[key],
});
}
}
@ -114,8 +115,10 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver {
if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) {
for (const attributeName in this.options.iscsi
.shareStrategyTargetCli.tpg.attributes) {
const attributeValue = this.options.iscsi
.shareStrategyTargetCli.tpg.attributes[attributeName];
const attributeValue =
this.options.iscsi.shareStrategyTargetCli.tpg.attributes[
attributeName
];
setAttributesText += "\n";
setAttributesText += `set attribute ${attributeName}=${attributeValue}`;
}
@ -124,8 +127,10 @@ class ControllerZfsGenericDriver extends ControllerZfsSshBaseDriver {
if (this.options.iscsi.shareStrategyTargetCli.tpg.auth) {
for (const attributeName in this.options.iscsi
.shareStrategyTargetCli.tpg.auth) {
const attributeValue = this.options.iscsi
.shareStrategyTargetCli.tpg.auth[attributeName];
const attributeValue =
this.options.iscsi.shareStrategyTargetCli.tpg.auth[
attributeName
];
setAttributesText += "\n";
setAttributesText += `set auth ${attributeName}=${attributeValue}`;
}
@ -168,9 +173,11 @@ create /backstores/block/${iscsiName}
volume_context = {
node_attach_driver: "iscsi",
portal: this.options.iscsi.targetPortal,
portals: this.options.iscsi.targetPortals.join(","),
interface: this.options.iscsi.interface,
portal: this.options.iscsi.targetPortal || "",
portals: this.options.iscsi.targetPortals
? this.options.iscsi.targetPortals.join(",")
: "",
interface: this.options.iscsi.interface || "",
iqn: iqn,
lun: 0,
};

View File

@ -90,9 +90,20 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
"CLONE_VOLUME",
//"PUBLISH_READONLY",
"EXPAND_VOLUME",
//"VOLUME_CONDITION", // added in v1.3.0
//"GET_VOLUME", // added in v1.3.0
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc.push(
//"VOLUME_CONDITION",
"GET_VOLUME"
);
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc.push(
"SINGLE_NODE_MULTI_WRITER"
);
}
}
if (!("rpc" in options.service.node.capabilities)) {
@ -118,6 +129,18 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
];
break;
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP"); // in k8s is sent in as the security context fsgroup
}
}
}
@ -219,6 +242,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
@ -247,6 +272,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
@ -1492,7 +1519,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
let types = [];
const volumeParentDatasetName = this.getVolumeParentDatasetName();
const snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
const snapshotParentDatasetName =
this.getDetachedSnapshotParentDatasetName();
// get data from cache and return immediately
if (starting_token) {
@ -1618,7 +1646,7 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
}
throw new GrpcError(grpc.status.NOT_FOUND, message);
}
throw new GrpcError(grpc.status.FAILED_PRECONDITION, e.toString());
throw new GrpcError(grpc.status.FAILED_PRECONDITION, err.toString());
}
response.indexed.forEach((row) => {
@ -1771,9 +1799,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
const datasetName = datasetParentName + "/" + source_volume_id;
snapshotProperties[SNAPSHOT_CSI_NAME_PROPERTY_NAME] = name;
snapshotProperties[
SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME
] = source_volume_id;
snapshotProperties[SNAPSHOT_CSI_SOURCE_VOLUME_ID_PROPERTY_NAME] =
source_volume_id;
snapshotProperties[MANAGED_PROPERTY_NAME] = "true";
driver.ctx.logger.verbose("requested snapshot name: %s", name);
@ -1995,9 +2022,8 @@ class ControllerZfsSshBaseDriver extends CsiBaseDriver {
// cleanup parent dataset if possible
if (detachedSnapshot) {
let containerDataset = zb.helpers.extractParentDatasetName(
fullSnapshotName
);
let containerDataset =
zb.helpers.extractParentDatasetName(fullSnapshotName);
try {
await this.removeSnapshotsFromDatatset(containerDataset);
await zb.zfs.destroy(containerDataset);

View File

@ -1,10 +1,14 @@
const { FreeNASDriver } = require("./freenas");
const { FreeNASSshDriver } = require("./freenas/ssh");
const { FreeNASApiDriver } = require("./freenas/api");
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
const {
ZfsLocalEphemeralInlineDriver,
} = require("./zfs-local-ephemeral-inline");
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
const { ControllerSmbClientDriver } = require("./controller-smb-client");
const { ControllerLustreClientDriver } = require("./controller-lustre-client");
const { ControllerSynologyDriver } = require("./controller-synology");
const { NodeManualDriver } = require("./node-manual");
function factory(ctx, options) {
@ -15,14 +19,26 @@ function factory(ctx, options) {
case "truenas-nfs":
case "truenas-smb":
case "truenas-iscsi":
return new FreeNASDriver(ctx, options);
return new FreeNASSshDriver(ctx, options);
case "freenas-api-iscsi":
case "freenas-api-nfs":
case "freenas-api-smb":
return new FreeNASApiDriver(ctx, options);
case "synology-nfs":
case "synology-smb":
case "synology-iscsi":
return new ControllerSynologyDriver(ctx, options);
case "zfs-generic-nfs":
case "zfs-generic-iscsi":
return new ControllerZfsGenericDriver(ctx, options);
case "zfs-local-ephemeral-inline":
return new ZfsLocalEphemeralInlineDriver(ctx, options);
case "smb-client":
return new ControllerSmbClientDriver(ctx, options);
case "nfs-client":
return new ControllerNfsClientDriver(ctx, options);
case "lustre-client":
return new ControllerLustreClientDriver(ctx, options);
case "node-manual":
return new NodeManualDriver(ctx, options);
default:

3840
src/driver/freenas/api.js Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,771 @@
const { sleep } = require("../../../utils/general");
const { Zetabyte } = require("../../../utils/zfs");
// used for in-memory cache of the version info
const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
class Api {
constructor(client, cache, options = {}) {
this.client = client;
this.cache = cache;
this.options = options;
}
async getHttpClient() {
return this.client;
}
/**
* only here for the helpers
* @returns
*/
async getZetabyte() {
return new Zetabyte({
executor: {
spawn: function () {
throw new Error(
"cannot use the zb implementation to execute zfs commands, must use the http api"
);
},
},
});
}
async findResourceByProperties(endpoint, match) {
if (!match) {
return;
}
if (typeof match === "object" && Object.keys(match).length < 1) {
return;
}
const httpClient = await this.getHttpClient();
let target;
let page = 0;
let lastReponse;
// loop and find target
let queryParams = {};
queryParams.limit = 100;
queryParams.offset = 0;
while (!target) {
//Content-Range: items 0-2/3 (full set)
//Content-Range: items 0--1/3 (invalid offset)
if (queryParams.hasOwnProperty("offset")) {
queryParams.offset = queryParams.limit * page;
}
// crude stoppage attempt
let response = await httpClient.get(endpoint, queryParams);
if (lastReponse) {
if (JSON.stringify(lastReponse) == JSON.stringify(response)) {
break;
}
}
lastReponse = response;
if (response.statusCode == 200) {
if (response.body.length < 1) {
break;
}
response.body.some((i) => {
let isMatch = true;
if (typeof match === "function") {
isMatch = match(i);
} else {
for (let property in match) {
if (match[property] != i[property]) {
isMatch = false;
break;
}
}
}
if (isMatch) {
target = i;
return true;
}
return false;
});
} else {
throw new Error(
"FreeNAS http error - code: " +
response.statusCode +
" body: " +
JSON.stringify(response.body)
);
}
page++;
}
return target;
}
async getApiVersion() {
const systemVersion = await this.getSystemVersion();
if (systemVersion.v2) {
if ((await this.getSystemVersionMajorMinor()) == 11.2) {
return 1;
}
return 2;
}
return 1;
}
async getIsFreeNAS() {
const systemVersion = await this.getSystemVersion();
let version;
if (systemVersion.v2) {
version = systemVersion.v2;
} else {
version = systemVersion.v1.fullversion;
}
if (version.toLowerCase().includes("freenas")) {
return true;
}
return false;
}
async getIsTrueNAS() {
const systemVersion = await this.getSystemVersion();
let version;
if (systemVersion.v2) {
version = systemVersion.v2;
} else {
version = systemVersion.v1.fullversion;
}
if (version.toLowerCase().includes("truenas")) {
return true;
}
return false;
}
async getIsScale() {
const systemVersion = await this.getSystemVersion();
if (systemVersion.v2 && systemVersion.v2.toLowerCase().includes("scale")) {
return true;
}
return false;
}
async getSystemVersionMajorMinor() {
const systemVersion = await this.getSystemVersion();
let parts;
let parts_i;
let version;
/*
systemVersion.v2 = "FreeNAS-11.2-U5";
systemVersion.v2 = "TrueNAS-SCALE-20.11-MASTER-20201127-092915";
systemVersion.v1 = {
fullversion: "FreeNAS-9.3-STABLE-201503200528",
fullversion: "FreeNAS-11.2-U5 (c129415c52)",
};
systemVersion.v2 = null;
*/
if (systemVersion.v2) {
version = systemVersion.v2;
} else {
version = systemVersion.v1.fullversion;
}
if (version) {
parts = version.split("-");
parts_i = [];
parts.forEach((value) => {
let i = value.replace(/[^\d.]/g, "");
if (i.length > 0) {
parts_i.push(i);
}
});
// join and resplit to deal with single elements which contain a decimal
parts_i = parts_i.join(".").split(".");
parts_i.splice(2);
return parts_i.join(".");
}
}
async getSystemVersionMajor() {
const majorMinor = await this.getSystemVersionMajorMinor();
return majorMinor.split(".")[0];
}
async setVersionInfoCache(versionInfo) {
await this.cache.set(
FREENAS_SYSTEM_VERSION_CACHE_KEY,
versionInfo,
60 * 1000
);
}
async getSystemVersion() {
let cacheData = await this.cache.get(FREENAS_SYSTEM_VERSION_CACHE_KEY);
if (cacheData) {
return cacheData;
}
const httpClient = await this.getHttpClient(false);
const endpoint = "/system/version/";
let response;
const startApiVersion = httpClient.getApiVersion();
const versionInfo = {};
const versionErrors = {};
const versionResponses = {};
httpClient.setApiVersion(2);
/**
* FreeNAS-11.2-U5
* TrueNAS-12.0-RELEASE
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
*/
try {
response = await httpClient.get(endpoint);
versionResponses.v2 = response;
if (response.statusCode == 200) {
versionInfo.v2 = response.body;
// return immediately to save on resources and silly requests
await this.setVersionInfoCache(versionInfo);
// reset apiVersion
httpClient.setApiVersion(startApiVersion);
return versionInfo;
}
} catch (e) {
// if more info is needed use e.stack
versionErrors.v2 = e.toString();
}
httpClient.setApiVersion(1);
/**
* {"fullversion": "FreeNAS-9.3-STABLE-201503200528", "name": "FreeNAS", "version": "9.3"}
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
*/
try {
response = await httpClient.get(endpoint);
versionResponses.v1 = response;
if (response.statusCode == 200 && IsJsonString(response.body)) {
versionInfo.v1 = response.body;
await this.setVersionInfoCache(versionInfo);
// reset apiVersion
httpClient.setApiVersion(startApiVersion);
return versionInfo;
}
} catch (e) {
// if more info is needed use e.stack
versionErrors.v1 = e.toString();
}
// throw error if cannot get v1 or v2 data
// likely bad creds/url
throw new GrpcError(
grpc.status.UNKNOWN,
`FreeNAS error getting system version info: ${JSON.stringify({
errors: versionErrors,
responses: versionResponses,
})}`
);
}
getIsUserProperty(property) {
if (property.includes(":")) {
return true;
}
return false;
}
getUserProperties(properties) {
let user_properties = {};
for (const property in properties) {
if (this.getIsUserProperty(property)) {
user_properties[property] = properties[property];
}
}
return user_properties;
}
getSystemProperties(properties) {
let system_properties = {};
for (const property in properties) {
if (!this.getIsUserProperty(property)) {
system_properties[property] = properties[property];
}
}
return system_properties;
}
getPropertiesKeyValueArray(properties) {
let arr = [];
for (const property in properties) {
arr.push({ key: property, value: properties[property] });
}
return arr;
}
normalizeProperties(dataset, properties) {
let res = {};
for (const property of properties) {
let p;
if (dataset.hasOwnProperty(property)) {
p = dataset[property];
} else if (
dataset.properties &&
dataset.properties.hasOwnProperty(property)
) {
p = dataset.properties[property];
} else if (
dataset.user_properties &&
dataset.user_properties.hasOwnProperty(property)
) {
p = dataset.user_properties[property];
} else {
p = {
value: "-",
rawvalue: "-",
source: "-",
};
}
if (typeof p === "object" && p !== null) {
// nothing, leave as is
} else {
p = {
value: p,
rawvalue: p,
source: "-",
};
}
res[property] = p;
}
return res;
}
async DatasetCreate(datasetName, data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
data.name = datasetName;
endpoint = "/pool/dataset";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* @param {*} datasetName
* @param {*} data
* @returns
*/
async DatasetDelete(datasetName, data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.delete(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("does not exist")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async DatasetSet(datasetName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.put(endpoint, {
...this.getSystemProperties(properties),
user_properties_update: this.getPropertiesKeyValueArray(
this.getUserProperties(properties)
),
});
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async DatasetInherit(datasetName, property) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
let system_properties = {};
let user_properties_update = [];
const isUserProperty = this.getIsUserProperty(property);
if (isUserProperty) {
user_properties_update = [{ key: property, remove: true }];
} else {
system_properties[property] = "INHERIT";
}
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.put(endpoint, {
...system_properties,
user_properties_update,
});
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* zfs get -Hp all tank/k8s/test/PVC-111
*
* @param {*} datasetName
* @param {*} properties
* @returns
*/
async DatasetGet(datasetName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/pool/dataset/id/${encodeURIComponent(datasetName)}`;
response = await httpClient.get(endpoint);
if (response.statusCode == 200) {
return this.normalizeProperties(response.body, properties);
}
if (response.statusCode == 404) {
throw new Error("dataset does not exist");
}
throw new Error(JSON.stringify(response.body));
}
async DatasetDestroySnapshots(datasetName, data = {}) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
data.name = datasetName;
endpoint = "/pool/dataset/destroy_snapshots";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return response.body;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async SnapshotSet(snapshotName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`;
response = await httpClient.put(endpoint, {
//...this.getSystemProperties(properties),
user_properties_update: this.getPropertiesKeyValueArray(
this.getUserProperties(properties)
),
});
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* zfs get -Hp all tank/k8s/test/PVC-111
*
* @param {*} snapshotName
* @param {*} properties
* @returns
*/
async SnapshotGet(snapshotName, properties) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`;
response = await httpClient.get(endpoint);
if (response.statusCode == 200) {
return this.normalizeProperties(response.body, properties);
}
if (response.statusCode == 404) {
throw new Error("dataset does not exist");
}
throw new Error(JSON.stringify(response.body));
}
async SnapshotCreate(snapshotName, data = {}) {
const httpClient = await this.getHttpClient(false);
const zb = await this.getZetabyte();
let response;
let endpoint;
const dataset = zb.helpers.extractDatasetName(snapshotName);
const snapshot = zb.helpers.extractSnapshotName(snapshotName);
data.dataset = dataset;
data.name = snapshot;
endpoint = "/zfs/snapshot";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async SnapshotDelete(snapshotName, data = {}) {
const httpClient = await this.getHttpClient(false);
const zb = await this.getZetabyte();
let response;
let endpoint;
endpoint = `/zfs/snapshot/id/${encodeURIComponent(snapshotName)}`;
response = await httpClient.delete(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (response.statusCode == 404) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("not found")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
async CloneCreate(snapshotName, datasetName, data = {}) {
const httpClient = await this.getHttpClient(false);
const zb = await this.getZetabyte();
let response;
let endpoint;
data.snapshot = snapshotName;
data.dataset_dst = datasetName;
endpoint = "/zfs/snapshot/clone";
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
if (
response.statusCode == 422 &&
JSON.stringify(response.body).includes("already exists")
) {
return;
}
throw new Error(JSON.stringify(response.body));
}
// get all dataset snapshots
// https://github.com/truenas/middleware/pull/6934
// then use core.bulk to delete all
/**
*
* /usr/lib/python3/dist-packages/middlewared/plugins/replication.py
* readonly enum=["SET", "REQUIRE", "IGNORE"]
*
* @param {*} data
* @returns
*/
async ReplicationRunOnetime(data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = "/replication/run_onetime";
response = await httpClient.post(endpoint, data);
// 200 means the 'job' was accepted only
// must continue to check the status of the job to know when it has finished and if it was successful
// /core/get_jobs [["id", "=", jobidhere]]
if (response.statusCode == 200) {
return response.body;
}
throw new Error(JSON.stringify(response.body));
}
async CoreWaitForJob(job_id, timeout = 0) {
if (!job_id) {
throw new Error("invalid job_id");
}
const startTime = Date.now() / 1000;
let currentTime;
let job;
// wait for job to finish
while (!job || !["SUCCESS", "ABORTED", "FAILED"].includes(job.state)) {
job = await this.CoreGetJobs({ id: job_id });
job = job[0];
await sleep(3000);
currentTime = Date.now() / 1000;
if (timeout > 0 && currentTime > startTime + timeout) {
throw new Error("timeout waiting for job to complete");
}
}
return job;
}
async CoreGetJobs(data) {
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = "/core/get_jobs";
response = await httpClient.get(endpoint, data);
// 200 means the 'job' was accepted only
// must continue to check the status of the job to know when it has finished and if it was successful
// /core/get_jobs [["id", "=", jobidhere]]
// state = SUCCESS/ABORTED/FAILED means finality has been reached
// state = RUNNING
if (response.statusCode == 200) {
return response.body;
}
throw new Error(JSON.stringify(response.body));
}
/**
*
* @param {*} data
*/
async FilesystemSetperm(data) {
/*
{
"path": "string",
"mode": "string",
"uid": 0,
"gid": 0,
"options": {
"stripacl": false,
"recursive": false,
"traverse": false
}
}
*/
const httpClient = await this.getHttpClient(false);
let response;
let endpoint;
endpoint = `/filesystem/setperm`;
response = await httpClient.post(endpoint, data);
if (response.statusCode == 200) {
return;
}
throw new Error(JSON.stringify(response.body));
}
}
function IsJsonString(str) {
try {
JSON.parse(str);
} catch (e) {
return false;
}
return true;
}
module.exports.Api = Api;

View File

@ -18,7 +18,7 @@ const FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME =
// used for in-memory cache of the version info
const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
class FreeNASDriver extends ControllerZfsSshBaseDriver {
class FreeNASSshDriver extends ControllerZfsSshBaseDriver {
/**
* cannot make this a storage class parameter as storage class/etc context is *not* sent
* into various calls such as GetControllerCapabilities etc
@ -201,9 +201,8 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver {
share = {
nfs_paths: [properties.mountpoint.value],
nfs_comment: `democratic-csi (${this.ctx.args.csiName}): ${datasetName}`,
nfs_network: this.options.nfs.shareAllowedNetworks.join(
","
),
nfs_network:
this.options.nfs.shareAllowedNetworks.join(","),
nfs_hosts: this.options.nfs.shareAllowedHosts.join(","),
nfs_alldirs: this.options.nfs.shareAlldirs,
nfs_ro: false,
@ -633,11 +632,10 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver {
? this.options.iscsi.extentBlocksize
: 512;
const extentDisablePhysicalBlocksize = this.options.iscsi.hasOwnProperty(
"extentDisablePhysicalBlocksize"
)
? this.options.iscsi.extentDisablePhysicalBlocksize
: true;
const extentDisablePhysicalBlocksize =
this.options.iscsi.hasOwnProperty("extentDisablePhysicalBlocksize")
? this.options.iscsi.extentDisablePhysicalBlocksize
: true;
const extentRpm = this.options.iscsi.hasOwnProperty("extentRpm")
? this.options.iscsi.extentRpm
@ -1232,27 +1230,13 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver {
[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName,
});
// iscsiadm -m discovery -t st -p 172.21.26.81
// iscsiadm -m node -T iqn.2011-03.lan.bitness.istgt:test -p bitness.lan -l
// FROM driver config? no, node attachment should have everything required to remain independent
// portal
// portals
// interface
// chap discovery
// chap session
// FROM context
// iqn
// lun
volume_context = {
node_attach_driver: "iscsi",
portal: this.options.iscsi.targetPortal,
portals: this.options.iscsi.targetPortals.join(","),
portal: this.options.iscsi.targetPortal || "",
portals: this.options.iscsi.targetPortals
? this.options.iscsi.targetPortals.join(",")
: "",
interface: this.options.iscsi.interface || "",
//chapDiscoveryEnabled: this.options.iscsi.chapDiscoveryEnabled,
//chapSessionEnabled: this.options.iscsi.chapSessionEnabled,
iqn: iqn,
lun: 0,
};
@ -1619,6 +1603,7 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver {
async expandVolume(call, datasetName) {
const driverShareType = this.getDriverShareType();
const sshClient = this.getSshClient();
const zb = await this.getZetabyte();
switch (driverShareType) {
case "iscsi":
@ -1626,7 +1611,29 @@ class FreeNASDriver extends ControllerZfsSshBaseDriver {
let command;
let reload = false;
if (isScale) {
command = sshClient.buildCommand("systemctl", ["reload", "scst"]);
let properties;
properties = await zb.zfs.get(datasetName, [
FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME,
]);
properties = properties[datasetName];
this.ctx.logger.debug("zfs props data: %j", properties);
let iscsiName =
properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
/**
* command = sshClient.buildCommand("systemctl", ["reload", "scst"]);
* does not help ^
*
* echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size
* works ^
*
* scstadmin -resync_dev ${iscsiName}
* works but always give a exit code of 1 ^
*/
command = sshClient.buildCommand("sh", [
"-c",
`echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size`,
]);
reload = true;
} else {
command = sshClient.buildCommand("/etc/rc.d/ctld", ["reload"]);
@ -1853,4 +1860,4 @@ function IsJsonString(str) {
return true;
}
module.exports.FreeNASDriver = FreeNASDriver;
module.exports.FreeNASSshDriver = FreeNASSshDriver;

View File

@ -1,3 +1,4 @@
const _ = require("lodash");
const os = require("os");
const fs = require("fs");
const { GrpcError, grpc } = require("../utils/grpc");
@ -17,7 +18,23 @@ const sleep = require("../utils/general").sleep;
class CsiBaseDriver {
constructor(ctx, options) {
this.ctx = ctx;
this.options = options;
this.options = options || {};
if (!this.options.hasOwnProperty("node")) {
this.options.node = {};
}
if (!this.options.node.hasOwnProperty("format")) {
this.options.node.format = {};
}
if (!this.options.node.hasOwnProperty("mount")) {
this.options.node.mount = {};
}
if (!this.options.node.mount.hasOwnProperty("checkFilesystem")) {
this.options.node.mount.checkFilesystem = {};
}
}
/**
@ -269,6 +286,7 @@ class CsiBaseDriver {
const volume_context = call.request.volume_context;
let fs_type;
let mount_flags;
let volume_mount_group;
const node_attach_driver = volume_context.node_attach_driver;
const block_path = staging_target_path + "/block_device";
const bind_mount_flags = [];
@ -280,6 +298,14 @@ class CsiBaseDriver {
call.request.volume_context.provisioner_driver_instance_id
);
/*
let mount_options = await mount.getMountOptions(staging_target_path);
console.log(mount_options);
console.log(await mount.getMountOptionValue(mount_options, "stripe"));
console.log(await mount.getMountOptionPresent(mount_options, "stripee"));
throw new Error("foobar");
*/
if (access_type == "mount") {
fs_type = capability.mount.fs_type;
mount_flags = capability.mount.mount_flags || [];
@ -288,6 +314,19 @@ class CsiBaseDriver {
mount_flags.push(normalizedSecrets.mount_flags);
}
mount_flags.push("defaults");
// https://github.com/karelzak/util-linux/issues/1429
//mount_flags.push("x-democratic-csi.managed");
//mount_flags.push("x-democratic-csi.staged");
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.5.0") &&
driver.options.service.node.capabilities.rpc.includes(
"VOLUME_MOUNT_GROUP"
)
) {
volume_mount_group = capability.mount.volume_mount_group; // in k8s this is derrived from the fsgroup in the pod security context
}
}
if (call.request.volume_context.provisioner_driver == "node-manual") {
@ -316,6 +355,7 @@ class CsiBaseDriver {
switch (node_attach_driver) {
case "nfs":
case "lustre":
device = `${volume_context.server}:${volume_context.share}`;
break;
case "smb":
@ -345,9 +385,27 @@ class CsiBaseDriver {
// ensure unique entries only
portals = [...new Set(portals)];
// stores actual device paths after iscsi login
let iscsiDevices = [];
// stores configuration of targets/iqn/luns to connect to
let iscsiConnections = [];
for (let portal of portals) {
iscsiConnections.push({
portal,
iqn: volume_context.iqn,
lun: volume_context.lun,
});
}
/**
* TODO: allow sending in iscsiConnection in a raw/manual format
* TODO: allow option to determine if send_targets should be invoked
* TODO: allow option to control whether nodedb entry should be created by driver
* TODO: allow option to control whether nodedb entry should be deleted by driver
*/
for (let iscsiConnection of iscsiConnections) {
// create DB entry
// https://library.netapp.com/ecmdocs/ECMP1654943/html/GUID-8EC685B4-8CB6-40D8-A8D5-031A3899BCDC.html
// put these options in place to force targets managed by csi to be explicitly attached (in the case of unclearn shutdown etc)
@ -363,24 +421,27 @@ class CsiBaseDriver {
}
}
await iscsi.iscsiadm.createNodeDBEntry(
volume_context.iqn,
portal,
iscsiConnection.iqn,
iscsiConnection.portal,
nodeDB
);
// login
await iscsi.iscsiadm.login(volume_context.iqn, portal);
await iscsi.iscsiadm.login(
iscsiConnection.iqn,
iscsiConnection.portal
);
// get associated session
let session = await iscsi.iscsiadm.getSession(
volume_context.iqn,
portal
iscsiConnection.iqn,
iscsiConnection.portal
);
// rescan in scenarios when login previously occurred but volumes never appeared
await iscsi.iscsiadm.rescanSession(session);
// find device name
device = `/dev/disk/by-path/ip-${portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`;
device = `/dev/disk/by-path/ip-${iscsiConnection.portal}-iscsi-${iscsiConnection.iqn}-lun-${iscsiConnection.lun}`;
let deviceByPath = device;
// can take some time for device to show up, loop for some period
@ -411,7 +472,7 @@ class CsiBaseDriver {
iscsiDevices.push(device);
driver.ctx.logger.info(
`successfully logged into portal ${portal} and created device ${deviceByPath} with realpath ${device}`
`successfully logged into portal ${iscsiConnection.portal} and created device ${deviceByPath} with realpath ${device}`
);
}
}
@ -433,7 +494,7 @@ class CsiBaseDriver {
);
}
if (iscsiDevices.length != portals.length) {
if (iscsiDevices.length != iscsiConnections.length) {
driver.ctx.logger.warn(
`failed to attach all iscsi devices/targets/portals`
);
@ -450,12 +511,14 @@ class CsiBaseDriver {
// compare all device-mapper slaves with the newly created devices
// if any of the new devices are device-mapper slaves treat this as a
// multipath scenario
let allDeviceMapperSlaves = await filesystem.getAllDeviceMapperSlaveDevices();
let allDeviceMapperSlaves =
await filesystem.getAllDeviceMapperSlaveDevices();
let commonDevices = allDeviceMapperSlaves.filter((value) =>
iscsiDevices.includes(value)
);
const useMultipath = portals.length > 1 || commonDevices.length > 0;
const useMultipath =
iscsiConnections.length > 1 || commonDevices.length > 0;
// discover multipath device to use
if (useMultipath) {
@ -488,7 +551,15 @@ class CsiBaseDriver {
// format
result = await filesystem.deviceIsFormatted(device);
if (!result) {
await filesystem.formatDevice(device, fs_type);
let formatOptions = _.get(
driver.options.node.format,
[fs_type, "customOptions"],
[]
);
if (!Array.isArray(formatOptions)) {
formatOptions = [];
}
await filesystem.formatDevice(device, fs_type, formatOptions);
}
let fs_info = await filesystem.getDeviceFilesystemInfo(device);
@ -500,9 +571,17 @@ class CsiBaseDriver {
staging_target_path
);
if (!result) {
// TODO: add a parameter to control this behavior
// https://github.com/democratic-csi/democratic-csi/issues/52#issuecomment-768463401
//await filesystem.checkFilesystem(device, fs_type);
let checkFilesystem =
driver.options.node.mount.checkFilesystem[fs_type] || {};
if (checkFilesystem.enabled) {
await filesystem.checkFilesystem(
device,
fs_type,
checkFilesystem.customOptions || [],
checkFilesystem.customFilesystemOptions || []
);
}
}
}
break;
@ -526,7 +605,33 @@ class CsiBaseDriver {
case "ext3":
case "ext4dev":
//await filesystem.checkFilesystem(device, fs_info.type);
await filesystem.expandFilesystem(device, fs_type);
try {
await filesystem.expandFilesystem(device, fs_type);
} catch (err) {
// mount is clean and rw, but it will not expand until clean umount has been done
// failed to execute filesystem command: resize2fs /dev/sda, response: {"code":1,"stdout":"Couldn't find valid filesystem superblock.\n","stderr":"resize2fs 1.44.5 (15-Dec-2018)\nresize2fs: Superblock checksum does not match superblock while trying to open /dev/sda\n"}
// /dev/sda on /var/lib/kubelet/plugins/kubernetes.io/csi/pv/pvc-4a80757e-5e87-475d-826f-44fcc4719348/globalmount type ext4 (rw,relatime,stripe=256)
if (
err.code == 1 &&
err.stdout.includes("find valid filesystem superblock") &&
err.stderr.includes("checksum does not match superblock")
) {
driver.ctx.logger.warn(
`successful mount, unsuccessful fs resize: attempting abnormal umount/mount/resize2fs to clear things up ${staging_target_path} (${device})`
);
// try an unmount/mount/fsck cycle again just to clean things up
await mount.umount(staging_target_path, []);
await mount.mount(
device,
staging_target_path,
["-t", fs_type].concat(["-o", mount_flags.join(",")])
);
await filesystem.expandFilesystem(device, fs_type);
} else {
throw err;
}
}
break;
case "xfs":
//await filesystem.checkFilesystem(device, fs_info.type);
@ -581,6 +686,7 @@ class CsiBaseDriver {
* @param {*} call
*/
async NodeUnstageVolume(call) {
const driver = this;
const mount = new Mount();
const filesystem = new Filesystem();
const iscsi = new ISCSI();
@ -594,7 +700,8 @@ class CsiBaseDriver {
const staging_target_path = call.request.staging_target_path;
const block_path = staging_target_path + "/block_device";
let normalized_staging_path = staging_target_path;
const umount_args = []; // --force
const umount_args = [];
const umount_force_extra_args = ["--force", "--lazy"];
if (!staging_target_path) {
throw new GrpcError(
@ -606,7 +713,30 @@ class CsiBaseDriver {
//result = await mount.pathIsMounted(block_path);
//result = await mount.pathIsMounted(staging_target_path)
result = await mount.pathIsMounted(block_path);
// TODO: use the x-* mount options to detect if we should delete target
try {
result = await mount.pathIsMounted(block_path);
} catch (err) {
/**
* on stalled fs such as nfs, even findmnt will return immediately for the base mount point
* so in the case of timeout here (base mount point and then a file/folder beneath it) we almost certainly are not a block device
* AND the fs is probably stalled
*/
if (err.timeout) {
driver.ctx.logger.warn(
`detected stale mount, attempting to force unmount: ${normalized_staging_path}`
);
await mount.umount(
normalized_staging_path,
umount_args.concat(umount_force_extra_args)
);
result = false; // assume we are *NOT* a block device at this point
} else {
throw err;
}
}
if (result) {
is_block = true;
access_type = "block";
@ -626,7 +756,33 @@ class CsiBaseDriver {
result = await mount.pathIsMounted(normalized_staging_path);
if (result) {
result = await mount.umount(normalized_staging_path, umount_args);
try {
result = await mount.umount(normalized_staging_path, umount_args);
} catch (err) {
if (err.timeout) {
driver.ctx.logger.warn(
`hit timeout waiting to unmount path: ${normalized_staging_path}`
);
result = await mount.getMountDetails(normalized_staging_path);
switch (result.fstype) {
case "nfs":
case "nfs4":
driver.ctx.logger.warn(
`detected stale nfs filesystem, attempting to force unmount: ${normalized_staging_path}`
);
result = await mount.umount(
normalized_staging_path,
umount_args.concat(umount_force_extra_args)
);
break;
default:
throw err;
break;
}
} else {
throw err;
}
}
}
if (is_block) {
@ -666,14 +822,13 @@ class CsiBaseDriver {
session.attached_scsi_devices.host &&
session.attached_scsi_devices.host.devices
) {
is_attached_to_session = session.attached_scsi_devices.host.devices.some(
(device) => {
is_attached_to_session =
session.attached_scsi_devices.host.devices.some((device) => {
if (device.attached_scsi_disk == block_device_info_i.name) {
return true;
}
return false;
}
);
});
}
if (is_attached_to_session) {
@ -749,6 +904,7 @@ class CsiBaseDriver {
}
async NodePublishVolume(call) {
const driver = this;
const mount = new Mount();
const filesystem = new Filesystem();
let result;
@ -758,22 +914,40 @@ class CsiBaseDriver {
const target_path = call.request.target_path;
const capability = call.request.volume_capability;
const access_type = capability.access_type || "mount";
let mount_flags;
let volume_mount_group;
const readonly = call.request.readonly;
const volume_context = call.request.volume_context;
const bind_mount_flags = [];
const node_attach_driver = volume_context.node_attach_driver;
if (access_type == "mount") {
let mount_flags = capability.mount.mount_flags || [];
mount_flags = capability.mount.mount_flags || [];
bind_mount_flags.push(...mount_flags);
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.5.0") &&
driver.options.service.node.capabilities.rpc.includes(
"VOLUME_MOUNT_GROUP"
)
) {
volume_mount_group = capability.mount.volume_mount_group; // in k8s this is derrived from the fsgroup in the pod security context
}
}
bind_mount_flags.push("defaults");
// https://github.com/karelzak/util-linux/issues/1429
//bind_mount_flags.push("x-democratic-csi.managed");
//bind_mount_flags.push("x-democratic-csi.published");
if (readonly) bind_mount_flags.push("ro");
// , "x-democratic-csi.ro"
switch (node_attach_driver) {
case "nfs":
case "smb":
case "lustre":
case "iscsi":
// ensure appropriate directories/files
switch (access_type) {
@ -864,17 +1038,65 @@ class CsiBaseDriver {
}
async NodeUnpublishVolume(call) {
const driver = this;
const mount = new Mount();
const filesystem = new Filesystem();
let result;
const volume_id = call.request.volume_id;
const target_path = call.request.target_path;
const umount_args = []; // --force
const umount_args = [];
const umount_force_extra_args = ["--force", "--lazy"];
try {
result = await mount.pathIsMounted(target_path);
} catch (err) {
// running findmnt on non-existant paths return immediately
// the only time this should timeout is on a stale fs
// so if timeout is hit we should be near certain it is indeed mounted
if (err.timeout) {
driver.ctx.logger.warn(
`detected stale mount, attempting to force unmount: ${target_path}`
);
await mount.umount(
target_path,
umount_args.concat(umount_force_extra_args)
);
result = false; // assume we have fully unmounted
} else {
throw err;
}
}
result = await mount.pathIsMounted(target_path);
if (result) {
result = await mount.umount(target_path, umount_args);
try {
result = await mount.umount(target_path, umount_args);
} catch (err) {
if (err.timeout) {
driver.ctx.logger.warn(
`hit timeout waiting to unmount path: ${target_path}`
);
// bind mounts do show the 'real' fs details
result = await mount.getMountDetails(target_path);
switch (result.fstype) {
case "nfs":
case "nfs4":
driver.ctx.logger.warn(
`detected stale nfs filesystem, attempting to force unmount: ${target_path}`
);
result = await mount.umount(
target_path,
umount_args.concat(umount_force_extra_args)
);
break;
default:
throw err;
break;
}
} else {
throw err;
}
}
}
result = await filesystem.pathExists(target_path);
@ -909,7 +1131,7 @@ class CsiBaseDriver {
//VOLUME_CONDITION
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") &&
options.service.node.capabilities.rpc.includes("VOLUME_CONDITION")
driver.options.service.node.capabilities.rpc.includes("VOLUME_CONDITION")
) {
// TODO: let drivers fill ths in
let abnormal = false;
@ -930,7 +1152,11 @@ class CsiBaseDriver {
switch (access_type) {
case "mount":
result = await mount.getMountDetails(device_path);
result = await mount.getMountDetails(device_path, [
"avail",
"size",
"used",
]);
res.usage = [
{

View File

@ -1,5 +1,6 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const semver = require("semver");
/**
* Driver which only runs the node portion and is meant to be used entirely
@ -58,6 +59,21 @@ class NodeManualDriver extends CsiBaseDriver {
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc
.push
//"SINGLE_NODE_MULTI_WRITER"
();
}
}
if (!("rpc" in options.service.node.capabilities)) {
@ -69,6 +85,18 @@ class NodeManualDriver extends CsiBaseDriver {
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
@ -87,6 +115,9 @@ class NodeManualDriver extends CsiBaseDriver {
case "smb":
driverResourceType = "filesystem";
fs_types = ["cifs"];
case "lustre":
driverResourceType = "filesystem";
fs_types = ["lustre"];
break;
case "iscsi":
driverResourceType = "volume";
@ -119,6 +150,8 @@ class NodeManualDriver extends CsiBaseDriver {
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
@ -145,6 +178,8 @@ class NodeManualDriver extends CsiBaseDriver {
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",

View File

@ -2,6 +2,7 @@ const fs = require("fs");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const { Filesystem } = require("../../utils/filesystem");
const semver = require("semver");
const SshClient = require("../../utils/ssh").SshClient;
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
@ -81,6 +82,21 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
//"PUBLISH_READONLY",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc
.push
//"SINGLE_NODE_MULTI_WRITER"
();
}
}
if (!("rpc" in options.service.node.capabilities)) {
@ -91,6 +107,18 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
"GET_VOLUME_STATS",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
@ -167,6 +195,8 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
].includes(capability.access_mode.mode)
) {
@ -192,6 +222,8 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
].includes(capability.access_mode.mode)
) {

View File

@ -326,9 +326,14 @@ class Filesystem {
try {
result = await filesystem.exec("blkid", args);
} catch (err) {
if (err.code == 2 && err.stderr.includes("No such device or address")) {
throw err;
}
if (err.code == 2) {
return false;
}
throw err;
}
@ -426,12 +431,13 @@ class Filesystem {
// echo 1 > /sys/block/sdb/device/rescan
const sys_file = `/sys/block/${device_name}/device/rescan`;
console.log(`executing filesystem command: echo 1 > ${sys_file}`);
fs.writeFileSync(sys_file, "1");
}
}
/**
* expand a give filesystem
* expand a given filesystem
*
* @param {*} device
* @param {*} fstype
@ -474,7 +480,7 @@ class Filesystem {
}
/**
* expand a give filesystem
* check a given filesystem
*
* fsck [options] -- [fs-options] [<filesystem> ...]
*
@ -593,7 +599,7 @@ class Filesystem {
args.unshift(command);
command = filesystem.options.paths.sudo;
}
console.log("executing fileystem command: %s %s", command, args.join(" "));
console.log("executing filesystem command: %s %s", command, args.join(" "));
const child = filesystem.options.executor.spawn(command, args, options);
let didTimeout = false;
@ -614,10 +620,16 @@ class Filesystem {
});
child.on("close", function (code) {
const result = { code, stdout, stderr };
const result = { code, stdout, stderr, timeout: false };
if (timeout) {
clearTimeout(timeout);
}
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
console.log(
"failed to execute filesystem command: %s, response: %j",

View File

@ -1,13 +1,17 @@
const cp = require("child_process");
const { Filesystem } = require("../utils/filesystem");
// avoid using avail,size,used as it causes hangs when the fs is stale
FINDMNT_COMMON_OPTIONS = [
"--output",
"source,target,fstype,label,options,avail,size,used",
"source,target,fstype,label,options",
"-b",
"-J"
"-J",
"--nofsroot", // prevents unwanted behavior with cifs volumes
];
DEFAUT_TIMEOUT = 30000;
class Mount {
constructor(options = {}) {
const mount = this;
@ -36,7 +40,7 @@ class Mount {
if (!options.executor) {
options.executor = {
spawn: cp.spawn
spawn: cp.spawn,
};
}
}
@ -141,11 +145,18 @@ class Mount {
*
* @param {*} path
*/
async getMountDetails(path) {
async getMountDetails(path, extraOutputProperties = [], extraArgs = []) {
const mount = this;
let args = [];
const common_options = JSON.parse(JSON.stringify(FINDMNT_COMMON_OPTIONS));
if (extraOutputProperties.length > 0) {
common_options[1] =
common_options[1] + "," + extraOutputProperties.join(",");
}
args = args.concat(["--mountpoint", path]);
args = args.concat(FINDMNT_COMMON_OPTIONS);
args = args.concat(common_options);
args = args.concat(extraArgs);
let result;
try {
@ -157,6 +168,94 @@ class Mount {
}
}
/**
* parse a mount options string into an array
*
* @param {*} options
* @returns
*/
async parseMountOptions(options) {
if (!options) {
return [];
}
if (Array.isArray(options)) {
return options;
}
options = options.split(",");
return options;
}
/**
* Given the set of mount options and sought after option, return true if the option is present
*
* @param {*} options
* @param {*} option
* @returns
*/
async getMountOptionPresent(options, option) {
const mount = this;
if (!Array.isArray(options)) {
options = await mount.parseMountOptions(options);
}
for (let i of options) {
let parts = i.split("=", 2);
if (parts[0] == option) {
return true;
}
}
return false;
}
/**
* Get the value of the given mount option
*
* if the mount option is present by has no value null is returned
* if the mount option is NOT present undefined is returned
* is the mount option has a value that value is returned
*
* @param {*} options
* @param {*} option
* @returns
*/
async getMountOptionValue(options, option) {
const mount = this;
if (!Array.isArray(options)) {
options = await mount.parseMountOptions(options);
}
for (let i of options) {
let parts = i.split("=", 2);
if (parts[0] == option) {
if (typeof parts[1] === "undefined") {
return null;
} else {
return parts[1];
}
}
}
return undefined;
}
/**
* Get mount optsion for a given path
*
* @param {*} path
* @returns Array
*/
async getMountOptions(path) {
const mount = this;
let details = await mount.getMountDetails(path, [], ["-m"]);
return await mount.parseMountOptions(details.options);
}
/**
* Get the device (source) at the given mount point
*
@ -179,8 +278,8 @@ class Mount {
/**
* very specifically looking for *devices* vs *filesystems/directories* which were bind mounted
*
* @param {*} path
*
* @param {*} path
*/
async isBindMountedBlockDevice(path) {
const filesystem = new Filesystem();
@ -278,7 +377,11 @@ class Mount {
return true;
}
exec(command, args, options) {
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAUT_TIMEOUT;
}
const mount = this;
args = args || [];
@ -290,9 +393,22 @@ class Mount {
args.unshift(command);
command = mount.options.paths.sudo;
}
console.log("executing mount command: %s %s", command, args.join(" "));
// https://regex101.com/r/FHIbcw/3
// replace password=foo with password=redacted
// (?<=password=)(?:([\"'])(?:\\\1|.)*?\1|[^,\s]+)
const regex = /(?<=password=)(?:([\"'])(?:\\\1|.)*?\1|[^,\s]+)/gi;
const cleansedLog = `${command} ${args.join(" ")}`.replace(
regex,
"redacted"
);
console.log("executing mount command: %s", cleansedLog);
const child = mount.options.executor.spawn(command, args, options);
/**
* timeout option natively supported since v16
* TODO: properly handle this based on nodejs version
*/
let didTimeout = false;
if (options && options.timeout) {
timeout = setTimeout(() => {
@ -302,19 +418,27 @@ class Mount {
}
return new Promise((resolve, reject) => {
child.stdout.on("data", function(data) {
child.stdout.on("data", function (data) {
stdout = stdout + data;
});
child.stderr.on("data", function(data) {
child.stderr.on("data", function (data) {
stderr = stderr + data;
});
child.on("close", function(code) {
const result = { code, stdout, stderr };
child.on("close", function (code) {
const result = { code, stdout, stderr, timeout: false };
if (timeout) {
clearTimeout(timeout);
}
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {