Refactor to TrueNAS SCALE 25.04+ WebSocket JSON-RPC API only

BREAKING CHANGES:
- Removed support for all systems except TrueNAS SCALE 25.04+
- Removed SSH-based driver (FreeNASSshDriver) and all SSH functionality
- Removed legacy HTTP REST API support (API v1.0 and v2.0)
- Removed generic ZFS, Synology, and other non-TrueNAS drivers
- Migrated to WebSocket JSON-RPC 2.0 protocol exclusively

Changes:
- Implemented new WebSocket JSON-RPC client using 'ws' package
- Completely rewrote API wrapper (reduced from 4,469 to 468 lines)
- Removed all legacy version detection and compatibility code
- Updated driver factory to only support truenas-nfs, truenas-iscsi, truenas-nvmeof
- Removed ssh2 and axios dependencies, added ws dependency
- Deleted 22 example configuration files for unsupported systems
- Deleted 15 driver implementation files for unsupported systems

The new implementation provides a clean, modern interface to TrueNAS SCALE
using versioned JSON-RPC over WebSocket (/api/current endpoint).

API methods now use direct JSON-RPC calls:
- pool.dataset.* for dataset operations
- zfs.snapshot.* for snapshot operations
- iscsi.* for iSCSI configuration (to be implemented)
- sharing.nfs.* for NFS shares (to be implemented)
- nvmet.* for NVMe-oF (to be implemented)
This commit is contained in:
Claude 2025-11-24 18:58:36 +00:00
parent 8a4a28a87f
commit a04d5eebe6
No known key found for this signature in database
41 changed files with 672 additions and 13487 deletions

View File

@ -1,100 +0,0 @@
driver: freenas-iscsi
instance_id:
httpConnection:
protocol: http
host: server address
port: 80
# use only 1 of apiKey or username/password
# if both are present, apiKey is preferred
# apiKey is only available starting in TrueNAS-12
#apiKey:
username: root
password:
allowInsecure: true
# use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
# leave unset for auto-detection
#apiVersion: 2
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
#
# leave paths unset for auto-detection
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
# total volume name (zvol/<datasetParentName>/<pvc name>) length cannot exceed 63 chars
# https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
# standard volume naming overhead is 46 chars
# datasetParentName should therefore be 17 chars or less when using TrueNAS 12 or below
datasetParentName: tank/k8s/b/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface:
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
#nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix: csi-
nameSuffix: "-clustera"
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
# https://github.com/democratic-csi/democratic-csi/issues/302
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
- targetGroupPortalGroup: 1
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1
# None, CHAP, or CHAP Mutual
targetGroupAuthType: None
# get the correct ID from the "Authorized Access" section of the UI
# only required if using Chap
targetGroupAuthGroup:
#extentCommentTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
extentInsecureTpc: true
extentXenCompat: false
extentDisablePhysicalBlocksize: true
# 512, 1024, 2048, or 4096,
extentBlocksize: 512
# "" (let FreeNAS decide, currently defaults to SSD), Unknown, SSD, 5400, 7200, 10000, 15000
extentRpm: "SSD"
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@ -1,70 +0,0 @@
driver: freenas-nfs
instance_id:
httpConnection:
protocol: http
host: server address
port: 80
# use only 1 of apiKey or username/password
# if both are present, apiKey is preferred
# apiKey is only available starting in TrueNAS-12
#apiKey:
username: root
password:
allowInsecure: true
# use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
# leave unset for auto-detection
#apiVersion: 2
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
#
# leave paths unset for auto-detection
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
#datasetPermissionsAcls:
#- "-m everyone@:full_set:allow"
#- "-m u:kube:full_set:allow"
nfs:
#shareCommentTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
shareHost: server address
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: wheel
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,119 +0,0 @@
driver: freenas-smb
instance_id:
httpConnection:
protocol: http
host: server address
port: 80
# use only 1 of apiKey or username/password
# if both are present, apiKey is preferred
# apiKey is only available starting in TrueNAS-12
#apiKey:
username: root
password:
allowInsecure: true
# use apiVersion 2 for TrueNAS-12 and up (will work on 11.x in some scenarios as well)
# leave unset for auto-detection
#apiVersion: 2
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
#
# leave paths unset for auto-detection
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetProperties:
aclmode: restricted
aclinherit: passthrough
acltype: nfsv4
casesensitivity: insensitive
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
# as appropriate create a dedicated user for smb connections
# and set this
datasetPermissionsUser: 65534
datasetPermissionsGroup: 65534
# CORE
#datasetPermissionsAclsBinary: setfacl
# SCALE
#datasetPermissionsAclsBinary: nfs4xdr_setfacl
# if using a user other than guest/nobody comment the 'everyone@' acl
# and uncomment the appropriate block below
datasetPermissionsAcls:
- "-m everyone@:full_set:fd:allow"
# CORE
# in CORE you cannot have multiple entries for the same principle
# or said differently, they are declarative so using -m will replace
# whatever the current value is for the principle rather than adding a
# entry in the acl list
#- "-m g:builtin_users:full_set:fd:allow"
#- "-m group@:modify_set:fd:allow"
#- "-m owner@:full_set:fd:allow"
# SCALE
# https://www.truenas.com/community/threads/get-setfacl-on-scale-with-nfsv4-acls.95231/
# -s replaces everything
# so we put this in specific order to mimic the defaults of SCALE when using the api
#- -s group:builtin_users:full_set:fd:allow
#- -a group:builtin_users:modify_set:fd:allow
#- -a group@:modify_set:fd:allow
#- -a owner@:full_set:fd:allow
smb:
shareHost: server address
nameTemplate: ""
namePrefix: ""
nameSuffix: ""
# if any of the shareFoo parameters do not work with your version of FreeNAS
# simply comment the param (and use the configuration template if necessary)
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: true
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:

View File

@ -1,59 +0,0 @@
driver: local-hostpath
instance_id:
local-hostpath:
# generally shareBasePath and controllerBasePath should be the same for this
# driver, this path should be mounted into the csi-driver container
shareBasePath: "/var/lib/csi-local-hostpath"
controllerBasePath: "/var/lib/csi-local-hostpath"
dirPermissionsMode: "0777"
dirPermissionsUser: 0
dirPermissionsGroup: 0
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# of local-hostpath the node name will be appended
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# of local-hostpath the node name will be appended
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -1,58 +0,0 @@
driver: lustre-client
instance_id:
lustre:
# <MGS NID>[:<MGS NID>]
shareHost: server address
shareBasePath: "/some/path"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/storage"
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# backup hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -1,57 +0,0 @@
driver: nfs-client
instance_id:
nfs:
shareHost: server address
shareBasePath: "/some/path"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/storage"
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -1,40 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: iscsi-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions: []
csi:
driver: org.democratic-csi.node-manual
readOnly: false
# can be ext4 or xfs
fsType: ext4
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
# can be used to handle CHAP
# in the secret create the following keys:
#
# # any arbitrary iscsiadm entries can be add by creating keys starting with node-db.<entry.name>
# # if doing CHAP
# node-db.node.session.auth.authmethod: CHAP
# node-db.node.session.auth.username: foo
# node-db.node.session.auth.password: bar
#
# # if doing mutual CHAP
# node-db.node.session.auth.username_in: baz
# node-db.node.session.auth.password_in: bar
#nodeStageSecretRef:
# name: some name
# namespace: some namespace
volumeAttributes:
portal: <ip:port>
#portals: <ip:port>,<ip:port>,...
iqn: <iqn>
lun: <lun>
node_attach_driver: iscsi
provisioner_driver: node-manual

View File

@ -1,25 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nfs-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
- nfsvers=3
- nolock
- noatime
csi:
driver: org.democratic-csi.node-manual
readOnly: false
fsType: nfs
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
volumeAttributes:
server: host or ip
share: /some/share
node_attach_driver: nfs
provisioner_driver: node-manual

View File

@ -1,26 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nvmeof-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions: []
csi:
driver: org.democratic-csi.node-manual
readOnly: false
# can be ext4 or xfs
fsType: ext4
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
volumeAttributes:
# rdma and fc are also available
transport: tcp://<ip:port>,
#transports: <transport>,<transport>,...
nqn: <nqn>
nsid: <nsid>
node_attach_driver: "nvmeof"
provisioner_driver: node-manual

View File

@ -1,51 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
name: objectivefs-secret
namespace: kube-system
stringData:
# these can be defined here OR in volumeAttributes
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
"env.OBJECTSTORE": ""
"env.ACCESS_KEY": ""
"env.SECRET_KEY": ""
"env.OBJECTIVEFS_PASSPHRASE": ""
# does NOT need admin key appended for node-manual operations
"env.OBJECTIVEFS_LICENSE": ""
"env.ENDPOINT": ""
# ...
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: objectivefs-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
[]
# https://objectivefs.com/userguide#mount
#- nodiratime
#- noatime
#- fsavail=<size>
csi:
driver: org.democratic-csi.node-manual
readOnly: false
fsType: objectivefs
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
nodeStageSecretRef:
name: objectivefs-secret
namespace: kube-system
volumeAttributes:
node_attach_driver: objectivefs
provisioner_driver: node-manual
filesystem: "ofs/test"
# these can be defined here OR in the secret referenced above
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
#"env.OBJECTSTORE": "minio://"
#"env.ACCESS_KEY": ""
# ...

View File

@ -1,29 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: smb-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
# creds can be entered into the node-stage-secret in the `mount_flags` key
# the value should be: username=foo,password=bar
- username=foo
- password=bar
csi:
driver: org.democratic-csi.node-manual
readOnly: false
fsType: cifs
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
#nodeStageSecretRef:
# name: some name
# namespace: some namespace
volumeAttributes:
server: host or ip
share: someshare
node_attach_driver: smb
provisioner_driver: node-manual

View File

@ -1,2 +0,0 @@
driver: node-manual

View File

@ -1,32 +0,0 @@
driver: objectivefs
objectivefs:
# note, ALL provisioned filesystems will be created in this pool / bucket
# with the same passphrase entered below
#
# in general this pool should be considered as fully managed by democratic-csi
# so a dedicated pool per-cluster / deployment would be best practice
#
pool: ofscsi
cli:
sudoEnabled: false
env:
# NOTE: this must be the license key + admin key
# admin key feature must be activated on your account
# https://objectivefs.com/howto/objectivefs-admin-key-setup
OBJECTIVEFS_LICENSE:
OBJECTSTORE:
ENDPOINT:
SECRET_KEY:
ACCESS_KEY:
# do NOT change this once it has been set and deployed
OBJECTIVEFS_PASSPHRASE:
# ...
_private:
csi:
volume:
idHash:
# due to 63 char limit on objectivefs fs name, we should
# hash volume names to prevent fs names which are too long
# can be 1 of md5, crc8, crc16, crc32
strategy: crc32

View File

@ -1,57 +0,0 @@
driver: smb-client
instance_id:
smb:
shareHost: server address
shareBasePath: "someshare/path"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/storage"
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -1,94 +0,0 @@
driver: synology-iscsi
httpConnection:
protocol: http
host: server address
port: 5000
username: admin
password: password
allowInsecure: true
# should be uniqe across all installs to the same nas
session: "democratic-csi"
serialize: true
# Choose the DSM volume this driver operates on. The default value is /volume1.
# synology:
# volume: /volume1
iscsi:
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface: ""
# can be whatever you would like
baseiqn: "iqn.2000-01.com.synology:csi."
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
namePrefix: ""
nameSuffix: ""
# documented below are several blocks
# pick the option appropriate for you based on what your backing fs is and desired features
# you do not need to alter dev_attribs under normal circumstances but they may be altered in advanced use-cases
# These options can also be configured per storage-class:
# See https://github.com/democratic-csi/democratic-csi/blob/master/docs/storage-class-parameters.md
lunTemplate:
# can be static value or handlebars template
#description: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# btrfs thin provisioning
type: "BLUN"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
# 3pc = Hardware-assisted data transfer
# tpu = Space reclamation
# can_snapshot = Snapshot
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
#- dev_attrib: emulate_3pc
# enable: 1
#- dev_attrib: emulate_tpu
# enable: 0
#- dev_attrib: can_snapshot
# enable: 1
# btfs thick provisioning
# only zeroing and locking supported
#type: "BLUN_THICK"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
# ext4 thinn provisioning UI sends everything with enabled=0
#type: "THIN"
# ext4 thin with advanced legacy features set
# can only alter tpu (all others are set as enabled=1)
#type: "ADV"
#dev_attribs:
#- dev_attrib: emulate_tpu
# enable: 1
# ext4 thick
# can only alter caw
#type: "FILE"
#dev_attribs:
#- dev_attrib: emulate_caw
# enable: 1
lunSnapshotTemplate:
is_locked: true
# https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot
is_app_consistent: true
targetTemplate:
auth_type: 0
max_sessions: 0

View File

@ -1,89 +0,0 @@
driver: zfs-generic-iscsi
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
iscsi:
shareStrategy: "targetCli"
# https://kifarunix.com/how-to-install-and-configure-iscsi-storage-server-on-ubuntu-18-04/
# https://kifarunix.com/how-install-and-configure-iscsi-storage-server-on-centos-7/
# https://linuxlasse.net/linux/howtos/ISCSI_and_ZFS_ZVOL
# http://www.linux-iscsi.org/wiki/ISCSI
# https://bugzilla.redhat.com/show_bug.cgi?id=1659195
# http://atodorov.org/blog/2015/04/07/how-to-configure-iscsi-target-on-red-hat-enterprise-linux-7/
shareStrategyTargetCli:
#sudoEnabled: true
basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664"
tpg:
attributes:
# set to 1 to enable CHAP
authentication: 0
# this is required currently as we do not register all node iqns
# the effective outcome of this is, allow all iqns to connect
generate_node_acls: 1
cache_dynamic_acls: 1
# if generate_node_acls is 1 then must turn this off as well (assuming you want write ability)
demo_mode_write_protect: 0
auth:
# CHAP
#userid: "foo"
#password: "bar"
# mutual CHAP
#mutual_userid: "baz"
#mutual_password: "bar"
block:
attributes:
# set to 1 to enable Thin Provisioning Unmap
emulate_tpu: 0
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
# leave empty to omit usage of -I with iscsiadm
interface: ""
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
#nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix:
nameSuffix:

View File

@ -1,55 +0,0 @@
driver: zfs-generic-nfs
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
#datasetPermissionsAcls:
#- "-m everyone@:full_set:allow"
#- "-m u:kube:full_set:allow"
nfs:
# https://docs.oracle.com/cd/E23824_01/html/821-1448/gayne.html
# https://www.hiroom2.com/2016/05/18/ubuntu-16-04-share-zfs-storage-via-nfs-smb/
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
#sharenfs: "rw,no_subtree_check,no_root_squash"
sharenfs: "on"
# share: ""
shareHost: "server address"

View File

@ -1,103 +0,0 @@
driver: zfs-generic-nvmeof
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
nvmeof:
# these are for the node/client aspect
transports:
- tcp://server:port
#- "tcp://127.0.0.1:4420?host-iface=eth0"
#- "tcp://[2001:123:456::1]:4420"
#- "rdma://127.0.0.1:4420"
#- "fc://[nn-0x203b00a098cbcac6:pn-0x203d00a098cbcac6]"
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
#nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix:
nameSuffix:
shareStrategy: "nvmetCli"
#shareStrategy: "spdkCli"
# https://documentation.suse.com/es-es/sles/15-SP1/html/SLES-all/cha-nvmeof.html
# https://www.linuxjournal.com/content/data-flash-part-iii-nvme-over-fabrics-using-tcp
# http://git.infradead.org/users/hch/nvmetcli.git
shareStrategyNvmetCli:
#sudoEnabled: true
# /root/.local/bin/nvmetcli
#nvmetcliPath: nvmetcli
# prevent startup race conditions by ensuring the config on disk has been imported
# before we start messing with things
#configIsImportedFilePath: /var/run/nvmet-config-loaded
#configPath: /etc/nvmet/config.json
basename: "nqn.2003-01.org.linux-nvme"
# add more ports here as appropriate if you have multipath
ports:
- "1"
subsystem:
attributes:
allow_any_host: 1
# not supported yet in nvmetcli
#namespace:
# attributes:
# buffered_io: 1
shareStrategySpdkCli:
# spdkcli.py
#spdkcliPath: spdkcli
configPath: /etc/spdk/spdk.json
basename: "nqn.2003-01.org.linux-nvmeof"
bdev:
type: uring
#type: aio
attributes:
block_size: 512
subsystem:
attributes:
allow_any_host: "true"
listeners:
- trtype: tcp
traddr: server
trsvcid: port
adrfam: ipv4

View File

@ -1,58 +0,0 @@
driver: zfs-generic-smb
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
datasetProperties:
#aclmode: restricted
#aclinherit: passthrough
#acltype: nfsv4
casesensitivity: insensitive
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: smbroot
datasetPermissionsGroup: smbroot
#datasetPermissionsAclsBinary: nfs4_setfacl
#datasetPermissionsAcls:
#- "-m everyone@:full_set:allow"
#- -s group@:modify_set:fd:allow
#- -a owner@:full_set:fd:allow
smb:
# https://docs.oracle.com/cd/E23824_01/html/821-1448/gayne.html
# https://www.hiroom2.com/2016/05/18/ubuntu-16-04-share-zfs-storage-via-nfs-smb/
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
sharesmb: "on"
# share: ""
shareHost: "server address"

View File

@ -1,11 +0,0 @@
driver: zfs-local-dataset
zfs:
datasetParentName: tank/k8s/local/v
detachedSnapshotsDatasetParentName: tank/k8s/local/s
datasetProperties:
# key: value
datasetEnableQuotas: true
datasetEnableReservation: false

View File

@ -1,12 +0,0 @@
driver: zfs-local-ephemeral-inline
zfs:
#chroot: "/host"
datasetParentName: tank/k8s/inline
properties:
# add any arbitrary properties you want here
#refquota:
# value: 10M
# allowOverride: false # default is to allow inline settings to override
#refreservation:
# value: 5M
# ...

View File

@ -1,13 +0,0 @@
driver: zfs-local-zvol
zfs:
datasetParentName: tank/k8s/local/v
detachedSnapshotsDatasetParentName: tank/k8s/local/s
datasetProperties:
# key: value
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:

View File

@ -22,7 +22,6 @@
"@grpc/proto-loader": "^0.7.0",
"@kubernetes/client-node": "^0.18.0",
"async-mutex": "^0.4.0",
"axios": "^1.1.3",
"bunyan": "^1.8.15",
"crc": "^4.3.2",
"fs-extra": "^11.1.0",
@ -32,10 +31,10 @@
"lru-cache": "^7.4.0",
"prompt": "^1.2.2",
"semver": "^7.3.4",
"ssh2": "^1.1.0",
"uri-js": "^4.4.1",
"uuid": "^9.0.0",
"winston": "^3.6.0",
"ws": "^8.14.0",
"yargs": "^17.0.1"
},
"devDependencies": {

File diff suppressed because it is too large Load Diff

View File

@ -1,92 +0,0 @@
const _ = require("lodash");
const { ControllerClientCommonDriver } = require("../controller-client-common");
const NODE_TOPOLOGY_KEY_NAME = "org.democratic-csi.topology/node";
/**
* Crude local-hostpath driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerLocalHostpathDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
const i_caps = _.get(
options,
"service.identity.capabilities.service",
false
);
const c_caps = _.get(options, "service.controller.capabilities", false);
super(...arguments);
if (!i_caps) {
this.ctx.logger.debug("setting local-hostpath identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
"VOLUME_ACCESSIBILITY_CONSTRAINTS",
];
}
if (!c_caps) {
this.ctx.logger.debug("setting local-hostpath controller service caps");
if (
!options.service.controller.capabilities.rpc.includes("GET_CAPACITY")
) {
options.service.controller.capabilities.rpc.push("GET_CAPACITY");
}
}
}
getConfigKey() {
return "local-hostpath";
}
getVolumeContext(volume_id) {
const driver = this;
return {
node_attach_driver: "hostpath",
path: driver.getShareVolumePath(volume_id),
};
}
getFsTypes() {
return [];
}
/**
* List of topologies associated with the *volume*
*
* @returns array
*/
async getAccessibleTopology() {
const response = await super.NodeGetInfo(...arguments);
return [
{
segments: {
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
},
},
];
}
/**
* Add node topologies
*
* @param {*} call
* @returns
*/
async NodeGetInfo(call) {
const response = await super.NodeGetInfo(...arguments);
response.accessible_topology = {
segments: {
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
},
};
return response;
}
}
module.exports.ControllerLocalHostpathDriver = ControllerLocalHostpathDriver;

View File

@ -1,31 +0,0 @@
const { ControllerClientCommonDriver } = require("../controller-client-common");
/**
* Crude lustre-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerLustreClientDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
super(...arguments);
}
getConfigKey() {
return "lustre";
}
getVolumeContext(volume_id) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "lustre",
server: this.options[config_key].shareHost,
share: driver.getShareVolumePath(volume_id),
};
}
getFsTypes() {
return ["lustre"];
}
}
module.exports.ControllerLustreClientDriver = ControllerLustreClientDriver;

View File

@ -1,31 +0,0 @@
const { ControllerClientCommonDriver } = require("../controller-client-common");
/**
* Crude nfs-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerNfsClientDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
super(...arguments);
}
getConfigKey() {
return "nfs";
}
getVolumeContext(volume_id) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "nfs",
server: this.options[config_key].shareHost,
share: driver.getShareVolumePath(volume_id),
};
}
getFsTypes() {
return ["nfs"];
}
}
module.exports.ControllerNfsClientDriver = ControllerNfsClientDriver;

View File

@ -1,670 +0,0 @@
const _ = require("lodash");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const GeneralUtils = require("../../utils/general");
const { ObjectiveFS } = require("../../utils/objectivefs");
const semver = require("semver");
const uuidv4 = require("uuid").v4;
const __REGISTRY_NS__ = "ControllerZfsLocalDriver";
const MAX_VOLUME_NAME_LENGTH = 63;
class ControllerObjectiveFSDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
//"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc.push(
"SINGLE_NODE_MULTI_WRITER"
);
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
async getObjectiveFSClient() {
const driver = this;
return this.ctx.registry.getAsync(
`${__REGISTRY_NS__}:objectivefsclient`,
async () => {
const options = {};
options.sudo = _.get(
driver.options,
"objectivefs.cli.sudoEnabled",
false
);
options.pool = _.get(driver.options, "objectivefs.pool");
return new ObjectiveFS({
...options,
env: _.get(driver.options, "objectivefs.env", {}),
});
}
);
}
/**
*
* @returns Array
*/
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
getFsTypes() {
return ["fuse.objectivefs", "objectivefs"];
}
assertCapabilities(capabilities) {
const driver = this;
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
let fs_types = driver.getFsTypes();
const valid = capabilities.every((capability) => {
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!fs_types.includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
!this.getAccessModes(capability).includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
});
return { valid, message };
}
async getVolumeStatus(entry) {
const driver = this;
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
const volume_id = entry.NAME.replace(object_store, "").split("/")[1];
if (!!!semver.satisfies(driver.ctx.csiVersion, ">=1.2.0")) {
return;
}
let abnormal = false;
let message = "OK";
let volume_status = {};
//LIST_VOLUMES_PUBLISHED_NODES
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.2.0") &&
driver.options.service.controller.capabilities.rpc.includes(
"LIST_VOLUMES_PUBLISHED_NODES"
)
) {
// TODO: let drivers fill this in
volume_status.published_node_ids = [];
}
//VOLUME_CONDITION
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") &&
driver.options.service.controller.capabilities.rpc.includes(
"VOLUME_CONDITION"
)
) {
// TODO: let drivers fill ths in
volume_condition = { abnormal, message };
volume_status.volume_condition = volume_condition;
}
return volume_status;
}
async populateCsiVolumeFromData(entry) {
const driver = this;
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
let filesystem = entry.NAME.replace(object_store, "");
let volume_content_source;
let volume_context = {
provisioner_driver: driver.options.driver,
node_attach_driver: "objectivefs",
filesystem,
object_store,
"env.OBJECTSTORE": object_store,
};
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
let accessible_topology;
let volume = {
volume_id: filesystem.split("/")[1],
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
accessible_topology,
};
return volume;
}
/**
* Ensure sane options are used etc
* true = ready
* false = not ready, but progressiong towards ready
* throw error = faulty setup
*
* @param {*} call
*/
async Probe(call) {
const driver = this;
const pool = _.get(driver.options, "objectivefs.pool");
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
if (driver.ctx.args.csiMode.includes("controller")) {
if (!pool) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`objectivefs.pool not configured`
);
}
if (!object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`env.OBJECTSTORE not configured`
);
}
return { ready: { value: true } };
} else {
return { ready: { value: true } };
}
}
/**
* Create an objectivefs filesystem as a new volume
*
* @param {*} call
*/
async CreateVolume(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
const parameters = call.request.parameters;
if (!pool) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`objectivefs.pool not configured`
);
}
if (!object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`env.OBJECTSTORE not configured`
);
}
const context_env = {};
for (const key in parameters) {
if (key.startsWith("env.")) {
context_env[key] = parameters[key];
}
}
context_env["env.OBJECTSTORE"] = object_store;
// filesystem names are always lower-cased by ofs
let volume_id = await driver.getVolumeIdFromCall(call);
let volume_content_source = call.request.volume_content_source;
volume_id = volume_id.toLowerCase();
const filesystem = `${pool}/${volume_id}`;
if (volume_id.length >= MAX_VOLUME_NAME_LENGTH) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`derived volume_id ${volume_id} is too long for objectivefs`
);
}
if (
call.request.volume_capabilities &&
call.request.volume_capabilities.length > 0
) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
} else {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
"missing volume_capabilities"
);
}
if (
!call.request.capacity_range ||
Object.keys(call.request.capacity_range).length === 0
) {
call.request.capacity_range = {
required_bytes: 1073741824, // meaningless
};
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
if (volume_content_source) {
//should never happen, cannot clone with this driver
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`cloning is not enabled`
);
}
await ofsClient.create({}, filesystem, ["-f"]);
let volume_context = {
provisioner_driver: driver.options.driver,
node_attach_driver: "objectivefs",
filesystem,
...context_env,
};
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* Delete a volume
*
* Deleting a volume consists of the following steps:
* 1. delete directory
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
let volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
volume_id = volume_id.toLowerCase();
const filesystem = `${pool}/${volume_id}`;
await ofsClient.destroy({}, filesystem, []);
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
let entries = [];
let entries_length = 0;
let next_token;
let uuid;
let response;
const max_entries = call.request.max_entries;
const starting_token = call.request.starting_token;
// get data from cache and return immediately
if (starting_token) {
let parts = starting_token.split(":");
uuid = parts[0];
let start_position = parseInt(parts[1]);
let end_position;
if (max_entries > 0) {
end_position = start_position + max_entries;
}
entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`);
if (entries) {
entries_length = entries.length;
entries = entries.slice(start_position, end_position);
if (max_entries > 0 && end_position > entries_length) {
next_token = `${uuid}:${end_position}`;
} else {
next_token = null;
}
const data = {
entries: entries,
next_token: next_token,
};
return data;
} else {
throw new GrpcError(
grpc.status.ABORTED,
`invalid starting_token: ${starting_token}`
);
}
}
entries = [];
const list_entries = await ofsClient.list({});
for (const entry of list_entries) {
if (entry.KIND != "ofs") {
continue;
}
let volume = await driver.populateCsiVolumeFromData(entry);
if (volume) {
let status = await driver.getVolumeStatus(entry);
entries.push({
volume,
status,
});
}
}
if (max_entries && entries.length > max_entries) {
uuid = uuidv4();
this.ctx.cache.set(`ListVolumes:result:${uuid}`, entries);
next_token = `${uuid}:${max_entries}`;
entries = entries.slice(0, max_entries);
}
const data = {
entries: entries,
next_token: next_token,
};
return data;
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
const volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing volume_id`);
}
const filesystem = `${pool}/${volume_id}`;
const entries = await ofsClient.list({}, filesystem);
const exists = entries.some((entry) => {
return entry.NAME.endsWith(filesystem) && entry.KIND == "ofs";
});
if (!exists) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_id: ${volume_id}`
);
}
const capabilities = call.request.volume_capabilities;
if (!capabilities || capabilities.length === 0) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing capabilities`);
}
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ControllerObjectiveFSDriver = ControllerObjectiveFSDriver;

View File

@ -1,31 +0,0 @@
const { ControllerClientCommonDriver } = require("../controller-client-common");
/**
* Crude smb-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
*/
class ControllerSmbClientDriver extends ControllerClientCommonDriver {
constructor(ctx, options) {
super(...arguments);
}
getConfigKey() {
return "smb";
}
getVolumeContext(volume_id) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "smb",
server: this.options[config_key].shareHost,
share: driver.stripLeadingSlash(driver.getShareVolumePath(volume_id)),
};
}
getFsTypes() {
return ["cifs"];
}
}
module.exports.ControllerSmbClientDriver = ControllerSmbClientDriver;

View File

@ -1,712 +0,0 @@
const _ = require("lodash");
const http = require("http");
const https = require("https");
const { axios_request, stringify } = require("../../../utils/general");
const Mutex = require("async-mutex").Mutex;
const { GrpcError, grpc } = require("../../../utils/grpc");
const USER_AGENT = "democratic-csi";
const __REGISTRY_NS__ = "SynologyHttpClient";
SYNO_ERRORS = {
400: {
status: grpc.status.UNAUTHENTICATED,
message: "Failed to authenticate to the Synology DSM.",
},
407: {
status: grpc.status.UNAUTHENTICATED,
message:
"IP has been blocked to the Synology DSM due to too many failed attempts.",
},
18990002: {
status: grpc.status.RESOURCE_EXHAUSTED,
message: "The synology volume is out of disk space.",
},
18990318: {
status: grpc.status.INVALID_ARGUMENT,
message:
"The requested lun type is incompatible with the Synology filesystem.",
},
18990538: {
status: grpc.status.ALREADY_EXISTS,
message: "A LUN with this name already exists.",
},
18990541: {
status: grpc.status.RESOURCE_EXHAUSTED,
message: "The maximum number of LUNS has been reached.",
},
18990542: {
status: grpc.status.RESOURCE_EXHAUSTED,
message: "The maximum number if iSCSI target has been reached.",
},
18990708: {
status: grpc.status.INVALID_ARGUMENT,
message: "Bad target auth info.",
},
18990744: {
status: grpc.status.ALREADY_EXISTS,
message: "An iSCSI target with this name already exists.",
},
18990532: { status: grpc.status.NOT_FOUND, message: "No such snapshot." },
18990500: { status: grpc.status.INVALID_ARGUMENT, message: "Bad LUN type" },
18990543: {
status: grpc.status.RESOURCE_EXHAUSTED,
message: "Maximum number of snapshots reached.",
},
18990635: {
status: grpc.status.INVALID_ARGUMENT,
message: "Invalid ioPolicy.",
},
};
class SynologyError extends GrpcError {
constructor(code, httpCode = undefined) {
super(0, "");
this.synoCode = code;
this.httpCode = httpCode;
if (code > 0) {
const error = SYNO_ERRORS[code];
this.code = error && error.status ? error.status : grpc.status.UNKNOWN;
this.message =
error && error.message
? error.message
: `An unknown error occurred when executing a synology command (code = ${code}).`;
} else {
this.code = grpc.status.UNKNOWN;
this.message = `The synology webserver returned a status code ${httpCode}`;
}
}
}
class SynologyHttpClient {
constructor(options = {}) {
this.options = JSON.parse(JSON.stringify(options));
this.logger = console;
this.doLoginMutex = new Mutex();
this.apiSerializeMutex = new Mutex();
if (false) {
setInterval(() => {
console.log("WIPING OUT SYNOLOGY SID");
this.sid = null;
}, 5 * 1000);
}
}
getHttpAgent() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_agent`, () => {
return new http.Agent({
keepAlive: true,
maxSockets: Infinity,
rejectUnauthorized: !!!this.options.allowInsecure,
});
});
}
getHttpsAgent() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:https_agent`, () => {
return new https.Agent({
keepAlive: true,
maxSockets: Infinity,
rejectUnauthorized: !!!this.options.allowInsecure,
});
});
}
log_response(error, response, body, options) {
const cleansedBody = JSON.parse(stringify(body));
const cleansedOptions = JSON.parse(stringify(options));
// This function handles arrays and objects
function recursiveCleanse(obj) {
for (const k in obj) {
if (typeof obj[k] == "object" && obj[k] !== null) {
recursiveCleanse(obj[k]);
} else {
if (
[
"account",
"passwd",
"username",
"password",
"_sid",
"sid",
"Authorization",
"authorization",
"user",
"mutual_user",
"mutual_password",
].includes(k)
) {
obj[k] = "redacted";
}
}
}
}
recursiveCleanse(cleansedBody);
recursiveCleanse(cleansedOptions);
delete cleansedOptions.httpAgent;
delete cleansedOptions.httpsAgent;
this.logger.debug("SYNOLOGY HTTP REQUEST: " + stringify(cleansedOptions));
this.logger.debug("SYNOLOGY HTTP ERROR: " + error);
this.logger.debug(
"SYNOLOGY HTTP STATUS: " + _.get(response, "statusCode", "")
);
this.logger.debug(
"SYNOLOGY HTTP HEADERS: " + stringify(_.get(response, "headers", ""))
);
this.logger.debug("SYNOLOGY HTTP BODY: " + stringify(cleansedBody));
}
async do_request(method, path, data = {}, options = {}) {
const client = this;
const isAuth = data.api == "SYNO.API.Auth" && data.method == "login";
let sid;
let apiMutexRelease;
if (!isAuth) {
sid = await this.doLoginMutex.runExclusive(async () => {
return await this.login();
});
}
const invoke_options = options;
if (!isAuth) {
if (this.options.serialize) {
apiMutexRelease = await this.apiSerializeMutex.acquire();
}
}
return new Promise((resolve, reject) => {
if (!isAuth) {
data._sid = sid;
}
const options = {
method: method,
url: `${this.options.protocol}://${this.options.host}:${this.options.port}/webapi/${path}`,
headers: {
Accept: "application/json",
"User-Agent": USER_AGENT,
"Content-Type": invoke_options.use_form_encoded
? "application/x-www-form-urlencoded"
: "application/json",
},
responseType: "json",
httpAgent: this.getHttpAgent(),
httpsAgent: this.getHttpsAgent(),
timeout: 60 * 1000,
};
switch (method) {
case "GET":
let qsData = JSON.parse(JSON.stringify(data));
for (let p in qsData) {
if (Array.isArray(qsData[p]) || typeof qsData[p] == "boolean") {
qsData[p] = JSON.stringify(qsData[p]);
}
}
options.params = qsData;
break;
default:
if (invoke_options.use_form_encoded) {
options.data = URLSearchParams(data).toString();
} else {
options.data = data;
}
break;
}
try {
axios_request(options, function (error, response, body) {
client.log_response(...arguments, options);
if (error) {
reject(error);
}
if (
typeof response.body !== "object" &&
response.body !== null &&
response.headers["content-type"] &&
response.headers["content-type"].includes("application/json")
) {
response.body = JSON.parse(response.body);
}
if (response.statusCode > 299 || response.statusCode < 200) {
reject(new SynologyError(null, response.statusCode));
}
if (response.body.success === false) {
// remove invalid sid
if (response.body.error.code == 119 && sid == client.sid) {
client.sid = null;
}
reject(
new SynologyError(response.body.error.code, response.statusCode)
);
}
resolve(response);
});
} finally {
if (typeof apiMutexRelease == "function") {
apiMutexRelease();
}
}
});
}
async login() {
if (!this.sid) {
// See https://global.download.synology.com/download/Document/Software/DeveloperGuide/Os/DSM/All/enu/DSM_Login_Web_API_Guide_enu.pdf
const data = {
api: "SYNO.API.Auth",
version: "6",
method: "login",
account: this.options.username,
passwd: this.options.password,
session: this.options.session,
format: "sid",
};
let response = await this.do_request("GET", "auth.cgi", data);
this.sid = response.body.data.sid;
}
return this.sid;
}
async GetLuns() {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
return response.body.data.luns;
}
async GetLunUUIDByName(name) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == name;
});
if (lun) {
return lun.uuid;
}
}
async GetLunIDByName(name) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == name;
});
if (lun) {
return lun.lun_id;
}
}
async GetLunByID(lun_id) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.lun_id == lun_id;
});
if (lun) {
return lun;
}
}
async GetLunByName(name) {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == name;
});
if (lun) {
return lun;
}
}
async GetSnapshots() {
let luns = await this.GetLuns();
let snapshots = [];
for (let lun of luns) {
const get_snapshot_info = {
api: "SYNO.Core.ISCSI.LUN",
method: "list_snapshot",
version: 1,
src_lun_uuid: JSON.stringify(lun.uuid),
};
let response = await this.do_request(
"GET",
"entry.cgi",
get_snapshot_info
);
snapshots = snapshots.concat(response.body.data.snapshots);
}
return snapshots;
}
async GetSnapshotByLunUUIDAndName(lun_uuid, name) {
const get_snapshot_info = {
api: "SYNO.Core.ISCSI.LUN",
method: "list_snapshot",
version: 1,
src_lun_uuid: JSON.stringify(lun_uuid),
};
let response = await this.do_request("GET", "entry.cgi", get_snapshot_info);
if (response.body.data.snapshots) {
let snapshot = response.body.data.snapshots.find((i) => {
return i.description == name;
});
if (snapshot) {
return snapshot;
}
}
}
async GetSnapshotByLunUUIDAndSnapshotUUID(lun_uuid, snapshot_uuid) {
const get_snapshot_info = {
api: "SYNO.Core.ISCSI.LUN",
method: "list_snapshot",
version: 1,
src_lun_uuid: JSON.stringify(lun_uuid),
};
let response = await this.do_request("GET", "entry.cgi", get_snapshot_info);
if (response.body.data.snapshots) {
let snapshot = response.body.data.snapshots.find((i) => {
return i.uuid == snapshot_uuid;
});
if (snapshot) {
return snapshot;
}
}
}
async DeleteSnapshot(snapshot_uuid) {
const iscsi_snapshot_delete = {
api: "SYNO.Core.ISCSI.LUN",
method: "delete_snapshot",
version: 1,
snapshot_uuid: JSON.stringify(snapshot_uuid), // snapshot_id
deleted_by: "democratic_csi", // ?
};
let response = await this.do_request(
"GET",
"entry.cgi",
iscsi_snapshot_delete
);
// return?
}
async GetVolumeInfo(volume_path) {
let data = {
api: "SYNO.Core.Storage.Volume",
method: "get",
version: "1",
//volume_path: "/volume1",
volume_path,
};
return await this.do_request("GET", "entry.cgi", data);
}
async GetTargetByTargetID(target_id) {
let targets = await this.ListTargets();
let target = targets.find((i) => {
return i.target_id == target_id;
});
return target;
}
async GetTargetByIQN(iqn) {
let targets = await this.ListTargets();
let target = targets.find((i) => {
return i.iqn == iqn;
});
return target;
}
async ListTargets() {
const iscsi_target_list = {
api: "SYNO.Core.ISCSI.Target",
version: "1",
path: "entry.cgi",
method: "list",
additional: '["mapped_lun", "status", "acls", "connected_sessions"]',
};
let response = await this.do_request("GET", "entry.cgi", iscsi_target_list);
return response.body.data.targets;
}
async CreateLun(data = {}) {
let response;
let iscsi_lun_create = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "create",
});
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
try {
response = await this.do_request("GET", "entry.cgi", iscsi_lun_create);
return response.body.data.uuid;
} catch (err) {
if (err.synoCode === 18990538) {
response = await this.do_request("GET", "entry.cgi", lun_list);
let lun = response.body.data.luns.find((i) => {
return i.name == iscsi_lun_create.name;
});
return lun.uuid;
} else {
throw err;
}
}
}
async MapLun(data = {}) {
// this is mapping from the perspective of the lun
let iscsi_target_map = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.LUN",
method: "map_target",
version: "1",
});
iscsi_target_map.uuid = JSON.stringify(iscsi_target_map.uuid);
iscsi_target_map.target_ids = JSON.stringify(iscsi_target_map.target_ids);
// this is mapping from the perspective of the target
/*
iscsi_target_map = Object.assign(data, {
api: "SYNO.Core.ISCSI.Target",
method: "map_lun",
version: "1",
});
iscsi_target_map.lun_uuids = JSON.stringify(iscsi_target_map.lun_uuids);
*/
await this.do_request("GET", "entry.cgi", iscsi_target_map);
}
async DeleteLun(uuid) {
uuid = uuid || "";
let iscsi_lun_delete = {
api: "SYNO.Core.ISCSI.LUN",
method: "delete",
version: 1,
//uuid: uuid,
uuid: JSON.stringify(""),
uuids: JSON.stringify([uuid]),
//is_soft_feas_ignored: false,
is_soft_feas_ignored: true,
//feasibility_precheck: true,
};
await this.do_request("GET", "entry.cgi", iscsi_lun_delete);
}
async DeleteAllLuns() {
const lun_list = {
api: "SYNO.Core.ISCSI.LUN",
version: "1",
method: "list",
};
let response = await this.do_request("GET", "entry.cgi", lun_list);
for (let lun of response.body.data.luns) {
await this.DeleteLun(lun.uuid);
}
}
async CreateSnapshot(data) {
data = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.LUN",
method: "take_snapshot",
version: 1,
});
data.src_lun_uuid = JSON.stringify(data.src_lun_uuid);
return await this.do_request("GET", "entry.cgi", data);
}
async CreateTarget(data = {}) {
let iscsi_target_create = Object.assign({}, data, {
api: "SYNO.Core.ISCSI.Target",
version: "1",
method: "create",
});
let response;
try {
response = await this.do_request("GET", "entry.cgi", iscsi_target_create);
return response.body.data.target_id;
} catch (err) {
if (err.synoCode === 18990744) {
//do lookup
const iscsi_target_list = {
api: "SYNO.Core.ISCSI.Target",
version: "1",
path: "entry.cgi",
method: "list",
additional: '["mapped_lun", "status", "acls", "connected_sessions"]',
};
response = await this.do_request("GET", "entry.cgi", iscsi_target_list);
let target = response.body.data.targets.find((i) => {
return i.iqn == iscsi_target_create.iqn;
});
if (target) {
return target.target_id;
} else {
throw err;
}
} else {
throw err;
}
}
}
async DeleteTarget(target_id) {
const iscsi_target_delete = {
api: "SYNO.Core.ISCSI.Target",
method: "delete",
version: "1",
path: "entry.cgi",
};
try {
await this.do_request(
"GET",
"entry.cgi",
Object.assign({}, iscsi_target_delete, {
target_id: JSON.stringify(String(target_id || "")),
})
);
} catch (err) {
/**
* 18990710 = non-existant
*/
//if (err.synoCode !== 18990710) {
throw err;
//}
}
}
async ExpandISCSILun(uuid, size) {
const iscsi_lun_extend = {
api: "SYNO.Core.ISCSI.LUN",
method: "set",
version: 1,
};
return await this.do_request(
"GET",
"entry.cgi",
Object.assign({}, iscsi_lun_extend, {
uuid: JSON.stringify(uuid),
new_size: size,
})
);
}
async CreateClonedVolume(
src_lun_uuid,
dst_lun_name,
dst_location,
description
) {
const create_cloned_volume = {
api: "SYNO.Core.ISCSI.LUN",
version: 1,
method: "clone",
src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid
dst_lun_name: dst_lun_name, // dst lun name
dst_location: dst_location,
is_same_pool: true, // always true? string?
clone_type: "democratic-csi", // check
};
if (description) {
create_cloned_volume.description = description;
}
return await this.do_request("GET", "entry.cgi", create_cloned_volume);
}
async CreateVolumeFromSnapshot(
src_lun_uuid,
snapshot_uuid,
cloned_lun_name,
description
) {
const create_volume_from_snapshot = {
api: "SYNO.Core.ISCSI.LUN",
version: 1,
method: "clone_snapshot",
src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid, snapshot id?
snapshot_uuid: JSON.stringify(snapshot_uuid), // shaptop uuid
cloned_lun_name: cloned_lun_name, // cloned lun name
clone_type: "democratic-csi", // check
};
if (description) {
create_volume_from_snapshot.description = description;
}
return await this.do_request(
"GET",
"entry.cgi",
create_volume_from_snapshot
);
}
}
module.exports.SynologyHttpClient = SynologyHttpClient;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,241 +0,0 @@
const _ = require("lodash");
const { ControllerZfsBaseDriver } = require("../controller-zfs");
const { GrpcError, grpc } = require("../../utils/grpc");
const GeneralUtils = require("../../utils/general");
const LocalCliExecClient =
require("../../utils/zfs_local_exec_client").LocalCliClient;
const { Zetabyte } = require("../../utils/zfs");
const ZFS_ASSET_NAME_PROPERTY_NAME = "zfs_asset_name";
const NODE_TOPOLOGY_KEY_NAME = "org.democratic-csi.topology/node";
const __REGISTRY_NS__ = "ControllerZfsLocalDriver";
class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
constructor(ctx, options) {
const i_caps = _.get(
options,
"service.identity.capabilities.service",
false
);
super(...arguments);
if (!i_caps) {
this.ctx.logger.debug("setting zfs-local identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
"VOLUME_ACCESSIBILITY_CONSTRAINTS",
];
}
}
getExecClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return new LocalCliExecClient({
logger: this.ctx.logger,
});
});
}
async getZetabyte() {
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
const execClient = this.getExecClient();
const options = {};
options.executor = execClient;
options.idempotent = true;
/*
if (
this.options.zfs.hasOwnProperty("cli") &&
this.options.zfs.cli &&
this.options.zfs.cli.hasOwnProperty("paths")
) {
options.paths = this.options.zfs.cli.paths;
}
*/
// use env based paths to allow for custom wrapper scripts to chroot to the host
options.paths = {
zfs: "zfs",
zpool: "zpool",
sudo: "sudo",
chroot: "chroot",
};
options.sudo = _.get(this.options, "zfs.cli.sudoEnabled", false);
if (typeof this.setZetabyteCustomOptions === "function") {
await this.setZetabyteCustomOptions(options);
}
return new Zetabyte(options);
});
}
/**
* cannot make this a storage class parameter as storage class/etc context is *not* sent
* into various calls such as GetControllerCapabilities etc
*/
getDriverZfsResourceType() {
switch (this.options.driver) {
case "zfs-local-dataset":
return "filesystem";
case "zfs-local-zvol":
return "volume";
default:
throw new Error("unknown driver: " + this.ctx.args.driver);
}
}
getFSTypes() {
const driverZfsResourceType = this.getDriverZfsResourceType();
switch (driverZfsResourceType) {
case "filesystem":
return ["zfs"];
case "volume":
return GeneralUtils.default_supported_block_filesystems();
}
}
/**
* Although it is conter-intuitive to advertise node-local volumes as RWX we
* do so here to provide an easy out-of-the-box experience as users will by
* default want to provision volumes of RWX. The topology contraints
* implicity will enforce only a single node can use the volume at a given
* time.
*
* @returns Array
*/
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
const driverZfsResourceType = this.getDriverZfsResourceType();
switch (driverZfsResourceType) {
case "filesystem":
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
case "volume":
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
/**
* csi controller service
*
* should create any necessary share resources and return volume context
*
* @param {*} datasetName
*/
async createShare(call, datasetName) {
let volume_context = {};
switch (this.options.driver) {
case "zfs-local-dataset":
volume_context = {
node_attach_driver: "zfs-local",
[ZFS_ASSET_NAME_PROPERTY_NAME]: datasetName,
};
return volume_context;
case "zfs-local-zvol":
volume_context = {
node_attach_driver: "zfs-local",
[ZFS_ASSET_NAME_PROPERTY_NAME]: datasetName,
};
return volume_context;
default:
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: unknown driver ${this.options.driver}`
);
}
}
/**
* csi controller service
*
* @param {*} call
* @param {*} datasetName
* @returns
*/
async deleteShare(call, datasetName) {
return {};
}
/**
* csi controller service
*
* @param {*} call
* @param {*} datasetName
*/
async expandVolume(call, datasetName) {}
/**
* List of topologies associated with the *volume*
*
* @returns array
*/
async getAccessibleTopology() {
const response = await super.NodeGetInfo(...arguments);
return [
{
segments: {
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
},
},
];
}
/**
* Add node topologies
*
* @param {*} call
* @returns
*/
async NodeGetInfo(call) {
const response = await super.NodeGetInfo(...arguments);
response.accessible_topology = {
segments: {
[NODE_TOPOLOGY_KEY_NAME]: response.node_id,
},
};
return response;
}
}
module.exports.ControllerZfsLocalDriver = ControllerZfsLocalDriver;

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +1,14 @@
const { FreeNASSshDriver } = require("./freenas/ssh");
const { FreeNASApiDriver } = require("./freenas/api");
const {
ControllerLocalHostpathDriver,
} = require("./controller-local-hostpath");
const { ControllerZfsGenericDriver } = require("./controller-zfs-generic");
const { ControllerZfsLocalDriver } = require("./controller-zfs-local");
const {
ZfsLocalEphemeralInlineDriver,
} = require("./zfs-local-ephemeral-inline");
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
const { ControllerSmbClientDriver } = require("./controller-smb-client");
const { ControllerLustreClientDriver } = require("./controller-lustre-client");
const { ControllerObjectiveFSDriver } = require("./controller-objectivefs");
const { ControllerSynologyDriver } = require("./controller-synology");
const { NodeManualDriver } = require("./node-manual");
function factory(ctx, options) {
switch (options.driver) {
case "freenas-nfs":
case "freenas-smb":
case "freenas-iscsi":
// TrueNAS SCALE 25.04+ drivers using JSON-RPC over WebSocket
case "truenas-nfs":
case "truenas-smb":
case "truenas-iscsi":
return new FreeNASSshDriver(ctx, options);
case "freenas-api-iscsi":
case "freenas-api-nfs":
case "freenas-api-smb":
case "truenas-nvmeof":
return new FreeNASApiDriver(ctx, options);
case "synology-nfs":
case "synology-smb":
case "synology-iscsi":
return new ControllerSynologyDriver(ctx, options);
case "zfs-generic-nfs":
case "zfs-generic-smb":
case "zfs-generic-iscsi":
case "zfs-generic-nvmeof":
return new ControllerZfsGenericDriver(ctx, options);
case "zfs-local-dataset":
case "zfs-local-zvol":
return new ControllerZfsLocalDriver(ctx, options);
case "zfs-local-ephemeral-inline":
return new ZfsLocalEphemeralInlineDriver(ctx, options);
case "smb-client":
return new ControllerSmbClientDriver(ctx, options);
case "nfs-client":
return new ControllerNfsClientDriver(ctx, options);
case "local-hostpath":
return new ControllerLocalHostpathDriver(ctx, options);
case "lustre-client":
return new ControllerLustreClientDriver(ctx, options);
case "objectivefs":
return new ControllerObjectiveFSDriver(ctx, options);
case "node-manual":
return new NodeManualDriver(ctx, options);
default:
throw new Error("invalid csi driver: " + options.driver);
throw new Error("invalid csi driver: " + options.driver + ". Only truenas-nfs, truenas-iscsi, and truenas-nvmeof are supported.");
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,244 +1,348 @@
const _ = require("lodash");
const http = require("http");
const https = require("https");
const URI = require("uri-js");
const { axios_request, stringify } = require("../../../utils/general");
const WebSocket = require("ws");
const { stringify } = require("../../../utils/general");
const USER_AGENT = "democratic-csi-driver";
/**
* TrueNAS SCALE 25.04+ WebSocket JSON-RPC 2.0 Client
*
* This client implements the JSON-RPC 2.0 protocol over WebSocket
* for communication with TrueNAS SCALE 25.04 and later versions.
*
* References:
* - https://api.truenas.com/v25.04.2/jsonrpc.html
* - https://github.com/truenas/api_client
*/
class Client {
constructor(options = {}) {
this.options = JSON.parse(JSON.stringify(options));
this.logger = console;
// default to v1.0 for now
if (!this.options.apiVersion) {
this.options.apiVersion = 2;
}
this.ws = null;
this.authenticated = false;
this.messageId = 0;
this.pendingRequests = new Map();
this.reconnectAttempts = 0;
this.maxReconnectAttempts = 5;
this.reconnectDelay = 2000; // Start with 2 seconds
this.eventSubscriptions = new Map();
this.connectPromise = null;
}
getHttpAgent() {
if (!this.httpAgent) {
this.httpAgent = new http.Agent({
keepAlive: true,
maxSockets: Infinity,
rejectUnauthorized: !!!this.options.allowInsecure,
});
}
return this.httpAgent;
}
getHttpsAgent() {
if (!this.httpsAgent) {
this.httpsAgent = new https.Agent({
keepAlive: true,
maxSockets: Infinity,
rejectUnauthorized: !!!this.options.allowInsecure,
});
}
return this.httpsAgent;
}
getBaseURL() {
/**
* Get WebSocket URL for TrueNAS SCALE 25.04+
* Format: ws://host:port/api/current or wss://host:port/api/current
*/
getWebSocketURL() {
const server = this.options;
if (!server.protocol) {
if (server.port) {
if (String(server.port).includes("80")) {
server.protocol = "http";
}
if (String(server.port).includes("443")) {
server.protocol = "https";
}
// Determine protocol
let protocol = server.protocol || "http";
if (server.port) {
if (String(server.port).includes("443")) {
protocol = "https";
} else if (String(server.port).includes("80")) {
protocol = "http";
}
}
if (!server.protocol) {
server.protocol = "http";
// Convert http/https to ws/wss
const wsProtocol = protocol === "https" ? "wss" : "ws";
// Build WebSocket URL - use /api/current for versioned JSON-RPC API
const port = server.port ? `:${server.port}` : "";
return `${wsProtocol}://${server.host}${port}/api/current`;
}
/**
* Connect to TrueNAS WebSocket API and authenticate
*/
async connect() {
// Return existing connection promise if already connecting
if (this.connectPromise) {
return this.connectPromise;
}
const options = {
scheme: server.protocol,
host: server.host,
port: server.port,
//userinfo: server.username + ":" + server.password,
path: server.apiVersion == 1 ? "/api/v1.0" : "/api/v2.0",
};
return URI.serialize(options);
// Return immediately if already connected and authenticated
if (this.ws && this.ws.readyState === WebSocket.OPEN && this.authenticated) {
return Promise.resolve();
}
this.connectPromise = this._doConnect();
try {
await this.connectPromise;
} finally {
this.connectPromise = null;
}
}
setApiVersion(apiVersion) {
this.options.apiVersion = apiVersion;
}
async _doConnect() {
const url = this.getWebSocketURL();
this.logger.debug(`Connecting to TrueNAS WebSocket API: ${url}`);
getApiVersion() {
return this.options.apiVersion;
}
return new Promise((resolve, reject) => {
try {
const wsOptions = {
headers: {
"User-Agent": USER_AGENT,
},
};
getRequestCommonOptions() {
const client = this;
const options = {
headers: {
Accept: "application/json",
"User-Agent": USER_AGENT,
"Content-Type": "application/json",
},
responseType: "json",
httpAgent: this.getHttpAgent(),
httpsAgent: this.getHttpsAgent(),
timeout: 60 * 1000,
validateStatus: function (status) {
if (status >= 500) {
return false;
// Handle insecure TLS
if (this.options.allowInsecure) {
wsOptions.rejectUnauthorized = false;
}
return true;
},
this.ws = new WebSocket(url, wsOptions);
this.ws.on("open", async () => {
this.logger.debug("WebSocket connection established");
this.reconnectAttempts = 0;
try {
// Authenticate immediately after connection
await this._authenticate();
this.authenticated = true;
resolve();
} catch (error) {
this.logger.error("Authentication failed:", error);
reject(error);
}
});
this.ws.on("message", (data) => {
this._handleMessage(data);
});
this.ws.on("error", (error) => {
this.logger.error("WebSocket error:", error);
if (!this.authenticated) {
reject(error);
}
});
this.ws.on("close", (code, reason) => {
this.logger.warn(`WebSocket closed: ${code} - ${reason}`);
this.authenticated = false;
// Reject all pending requests
this.pendingRequests.forEach((pending) => {
pending.reject(new Error("WebSocket connection closed"));
});
this.pendingRequests.clear();
// Attempt to reconnect
this._scheduleReconnect();
});
} catch (error) {
reject(error);
}
});
}
/**
* Authenticate with TrueNAS API
*/
async _authenticate() {
if (this.options.apiKey) {
// Authenticate with API key
this.logger.debug("Authenticating with API key");
await this.call("auth.login_with_api_key", [this.options.apiKey]);
} else if (this.options.username && this.options.password) {
// Authenticate with username/password
this.logger.debug(`Authenticating with username: ${this.options.username}`);
await this.call("auth.login", [this.options.username, this.options.password]);
} else {
throw new Error("No authentication credentials provided (apiKey or username/password required)");
}
this.logger.debug("Authentication successful");
}
/**
* Schedule reconnection with exponential backoff
*/
_scheduleReconnect() {
if (this.reconnectAttempts >= this.maxReconnectAttempts) {
this.logger.error("Max reconnection attempts reached");
return;
}
this.reconnectAttempts++;
const delay = this.reconnectDelay * Math.pow(2, this.reconnectAttempts - 1);
this.logger.info(`Scheduling reconnection attempt ${this.reconnectAttempts} in ${delay}ms`);
setTimeout(() => {
this.connect().catch((error) => {
this.logger.error("Reconnection failed:", error);
});
}, delay);
}
/**
* Handle incoming WebSocket messages
*/
_handleMessage(data) {
try {
const message = JSON.parse(data.toString());
this.logger.debug("Received message:", stringify(message));
// Handle JSON-RPC response
if (message.id !== undefined && this.pendingRequests.has(message.id)) {
const pending = this.pendingRequests.get(message.id);
this.pendingRequests.delete(message.id);
if (message.error) {
pending.reject(this._createError(message.error));
} else {
pending.resolve(message.result);
}
}
// Handle JSON-RPC notification (event)
else if (message.method && message.id === undefined) {
this._handleEvent(message.method, message.params);
}
} catch (error) {
this.logger.error("Error parsing WebSocket message:", error);
}
}
/**
* Handle event notifications
*/
_handleEvent(method, params) {
const handlers = this.eventSubscriptions.get(method) || [];
handlers.forEach((handler) => {
try {
handler(params);
} catch (error) {
this.logger.error(`Error in event handler for ${method}:`, error);
}
});
}
/**
* Create error object from JSON-RPC error
*/
_createError(error) {
const err = new Error(error.message || "TrueNAS API Error");
err.code = error.code;
err.data = error.data;
return err;
}
/**
* Make a JSON-RPC 2.0 method call
*
* @param {string} method - The API method to call (e.g., "pool.dataset.query")
* @param {array} params - Array of parameters for the method
* @param {object} options - Additional options (timeout, etc.)
* @returns {Promise} - Promise that resolves with the result
*/
async call(method, params = [], options = {}) {
// Ensure connection is established
if (!this.authenticated) {
await this.connect();
}
const messageId = ++this.messageId;
const request = {
jsonrpc: "2.0",
id: messageId,
method: method,
params: params,
};
if (client.options.apiKey) {
options.headers.Authorization = `Bearer ${client.options.apiKey}`;
} else if (client.options.username && client.options.password) {
options.auth = {
username: client.options.username,
password: client.options.password,
};
}
return options;
}
log_repsonse(error, response, body, options) {
let prop;
let val;
prop = "auth.username";
val = _.get(options, prop, false);
if (val) {
_.set(options, prop, "redacted");
}
prop = "auth.password";
val = _.get(options, prop, false);
if (val) {
_.set(options, prop, "redacted");
}
prop = "headers.Authorization";
val = _.get(options, prop, false);
if (val) {
_.set(options, prop, "redacted");
}
delete options.httpAgent;
delete options.httpsAgent;
let duration = parseFloat(
Math.round((_.get(response, "duration", 0) + Number.EPSILON) * 100) /
100 /
1000
).toFixed(2);
this.logger.debug("FREENAS HTTP REQUEST DETAILS: " + stringify(options));
this.logger.debug("FREENAS HTTP REQUEST DURATION: " + duration + "s");
this.logger.debug("FREENAS HTTP ERROR: " + error);
this.logger.debug(
"FREENAS HTTP RESPONSE STATUS CODE: " + _.get(response, "statusCode", "")
);
this.logger.debug(
"FREENAS HTTP RESPONSE HEADERS: " +
stringify(_.get(response, "headers", ""))
);
this.logger.debug("FREENAS HTTP RESPONSE BODY: " + stringify(body));
}
async get(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
this.logger.debug(`Calling method ${method} with params:`, stringify(params));
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "GET";
options.url = this.getBaseURL() + endpoint;
options.params = data;
axios_request(options, function (err, res, body) {
client.log_repsonse(...arguments, options);
if (err) {
reject(err);
// Set timeout
const timeout = options.timeout || 60000;
const timeoutHandle = setTimeout(() => {
if (this.pendingRequests.has(messageId)) {
this.pendingRequests.delete(messageId);
reject(new Error(`Request timeout: ${method}`));
}
resolve(res);
}, timeout);
// Store pending request
this.pendingRequests.set(messageId, {
resolve: (result) => {
clearTimeout(timeoutHandle);
resolve(result);
},
reject: (error) => {
clearTimeout(timeoutHandle);
reject(error);
},
});
// Send request
try {
this.ws.send(JSON.stringify(request));
} catch (error) {
this.pendingRequests.delete(messageId);
clearTimeout(timeoutHandle);
reject(error);
}
});
}
async post(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
/**
* Subscribe to an event
*
* @param {string} event - Event name to subscribe to
* @param {function} handler - Handler function to call when event is received
*/
subscribe(event, handler) {
if (!this.eventSubscriptions.has(event)) {
this.eventSubscriptions.set(event, []);
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "POST";
options.url = this.getBaseURL() + endpoint;
options.data = data;
axios_request(options, function (err, res, body) {
client.log_repsonse(...arguments, options);
if (err) {
reject(err);
}
resolve(res);
});
});
this.eventSubscriptions.get(event).push(handler);
}
async put(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
/**
* Unsubscribe from an event
*/
unsubscribe(event, handler) {
if (!this.eventSubscriptions.has(event)) {
return;
}
const handlers = this.eventSubscriptions.get(event);
const index = handlers.indexOf(handler);
if (index > -1) {
handlers.splice(index, 1);
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "PUT";
options.url = this.getBaseURL() + endpoint;
options.data = data;
axios_request(options, function (err, res, body) {
client.log_repsonse(...arguments, options);
if (err) {
reject(err);
}
resolve(res);
});
});
}
async delete(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
/**
* Close the WebSocket connection
*/
async close() {
if (this.ws) {
this.ws.close();
this.ws = null;
this.authenticated = false;
}
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "DELETE";
options.url = this.getBaseURL() + endpoint;
options.data = data;
// Legacy compatibility methods (will be removed after full migration)
async get() {
throw new Error("HTTP GET is not supported. Use call() method with appropriate JSON-RPC method.");
}
axios_request(options, function (err, res, body) {
client.log_repsonse(...arguments, options);
if (err) {
reject(err);
}
async post() {
throw new Error("HTTP POST is not supported. Use call() method with appropriate JSON-RPC method.");
}
resolve(res);
});
});
async put() {
throw new Error("HTTP PUT is not supported. Use call() method with appropriate JSON-RPC method.");
}
async delete() {
throw new Error("HTTP DELETE is not supported. Use call() method with appropriate JSON-RPC method.");
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,341 +0,0 @@
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const semver = require("semver");
/**
* Driver which only runs the node portion and is meant to be used entirely
* with manually created PVs
*/
class NodeManualDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
//"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
//"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
//"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc
.push
//"SINGLE_NODE_MULTI_WRITER"
();
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
assertCapabilities(capabilities, node_attach_driver) {
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
let driverResourceType;
let fs_types = [];
let access_modes = [];
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
switch (node_attach_driver) {
case "nfs":
driverResourceType = "filesystem";
fs_types = ["nfs"];
break;
case "smb":
driverResourceType = "filesystem";
fs_types = ["cifs"];
break;
case "lustre":
driverResourceType = "filesystem";
fs_types = ["lustre"];
break;
case "objectivefs":
driverResourceType = "filesystem";
fs_types = ["objectivefs", "fuse.objectivefs"];
break;
case "oneclient":
driverResourceType = "filesystem";
fs_types = ["oneclient", "fuse.oneclient"];
break;
case "hostpath":
driverResourceType = "filesystem";
break;
case "iscsi":
case "nvmeof":
driverResourceType = "volume";
fs_types = ["btrfs", "ext3", "ext4", "ext4dev", "xfs"];
break;
case "zfs-local":
driverResourceType = "volume";
fs_types = ["btrfs", "ext3", "ext4", "ext4dev", "xfs", "zfs"];
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
];
default:
return {
valid: false,
message: `unknown node_attach_driver: ${node_attach_driver}`,
};
}
const valid = capabilities.every((capability) => {
switch (driverResourceType) {
case "filesystem":
if (access_modes.length == 0) {
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!fs_types.includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (!access_modes.includes(capability.access_mode.mode)) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
case "volume":
if (access_modes.length == 0) {
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
];
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
if (capability.access_type == "mount") {
if (
capability.mount.fs_type &&
!fs_types.includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
}
if (!access_modes.includes(capability.access_mode.mode)) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
}
});
return { valid, message };
}
/**
*
* @param {*} call
*/
async CreateVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async DeleteVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async GetCapacity(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ListVolumes(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async DeleteSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
}
module.exports.NodeManualDriver = NodeManualDriver;

View File

@ -1,509 +0,0 @@
const fs = require("fs");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const { Filesystem } = require("../../utils/filesystem");
const semver = require("semver");
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
// zfs common properties
const MANAGED_PROPERTY_NAME = "democratic-csi:managed_resource";
const SUCCESS_PROPERTY_NAME = "democratic-csi:provision_success";
const VOLUME_CSI_NAME_PROPERTY_NAME = "democratic-csi:csi_volume_name";
const VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME =
"democratic-csi:volume_context_provisioner_driver";
const VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME =
"democratic-csi:volume_context_provisioner_instance_id";
const __REGISTRY_NS__ = "ZfsLocalEphemeralInlineDriver";
/**
* https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
* https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
*
* Sample calls:
* - https://gcsweb.k8s.io/gcs/kubernetes-jenkins/pr-logs/pull/92387/pull-kubernetes-e2e-gce/1280784994997899264/artifacts/_sig-storage_CSI_Volumes/_Driver_csi-hostpath_/_Testpattern_inline_ephemeral_CSI_volume_ephemeral/should_create_read_write_inline_ephemeral_volume/
* - https://storage.googleapis.com/kubernetes-jenkins/pr-logs/pull/92387/pull-kubernetes-e2e-gce/1280784994997899264/artifacts/_sig-storage_CSI_Volumes/_Driver_csi-hostpath_/_Testpattern_inline_ephemeral_CSI_volume_ephemeral/should_create_read-only_inline_ephemeral_volume/csi-hostpathplugin-0-hostpath.log
*
* inline drivers are assumed to be mount only (no block support)
* purposely there is no native support for size contraints
*
* TODO: support creating zvols and formatting and mounting locally instead of using zfs dataset?
*
*/
class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
"UNKNOWN",
//"CONTROLLER_SERVICE"
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
"UNKNOWN",
//"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
//"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
//"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc
.push
//"SINGLE_NODE_MULTI_WRITER"
();
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
//"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
getSshClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:ssh_client`, () => {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection,
});
});
}
getZetabyte() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
let sshClient;
let executor;
if (this.options.sshConnection) {
sshClient = this.getSshClient();
executor = new ZfsSshProcessManager(sshClient);
}
return new Zetabyte({
executor,
idempotent: true,
chroot: this.options.zfs.chroot,
paths: {
zpool: "/usr/sbin/zpool",
zfs: "/usr/sbin/zfs",
},
});
});
}
getDatasetParentName() {
let datasetParentName = this.options.zfs.datasetParentName;
datasetParentName = datasetParentName.replace(/\/$/, "");
return datasetParentName;
}
getVolumeParentDatasetName() {
let datasetParentName = this.getDatasetParentName();
datasetParentName += "/v";
datasetParentName = datasetParentName.replace(/\/$/, "");
return datasetParentName;
}
assertCapabilities(capabilities) {
// hard code this for now
const driverZfsResourceType = "filesystem";
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
//[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
const valid = capabilities.every((capability) => {
switch (driverZfsResourceType) {
case "filesystem":
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!["zfs"].includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
capability.mount.mount_flags &&
capability.mount.mount_flags.length > 0
) {
message = `invalid mount_flags ${capability.mount.mount_flags}`;
return false;
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
case "volume":
if (capability.access_type == "mount") {
if (
capability.mount.fs_type &&
!["btrfs", "ext3", "ext4", "ext4dev", "xfs"].includes(
capability.mount.fs_type
)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
}
if (
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
}
});
return { valid, message };
}
/**
* This should create a dataset with appropriate volume properties, ensuring
* the mountpoint is the target_path
*
* Any volume_context attributes starting with property.<name> will be set as zfs properties
*
* {
"target_path": "/var/lib/kubelet/pods/f8b237db-19e8-44ae-b1d2-740c9aeea702/volumes/kubernetes.io~csi/my-volume-0/mount",
"volume_capability": {
"AccessType": {
"Mount": {}
},
"access_mode": {
"mode": 1
}
},
"volume_context": {
"csi.storage.k8s.io/ephemeral": "true",
"csi.storage.k8s.io/pod.name": "inline-volume-tester-2ptb7",
"csi.storage.k8s.io/pod.namespace": "ephemeral-468",
"csi.storage.k8s.io/pod.uid": "f8b237db-19e8-44ae-b1d2-740c9aeea702",
"csi.storage.k8s.io/serviceAccount.name": "default",
"foo": "bar"
},
"volume_id": "csi-8228252978a824126924de00126e6aec7c989a48a39d577bd3ab718647df5555"
}
*
* @param {*} call
*/
async NodePublishVolume(call) {
const driver = this;
const zb = this.getZetabyte();
const volume_id = call.request.volume_id;
const staging_target_path = call.request.staging_target_path || "";
const target_path = call.request.target_path;
const capability = call.request.volume_capability;
const access_type = capability.access_type || "mount";
const readonly = call.request.readonly;
const volume_context = call.request.volume_context;
let datasetParentName = this.getVolumeParentDatasetName();
let name = volume_id;
if (!datasetParentName) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing datasetParentName`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
if (!target_path) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`target_path is required`
);
}
if (capability) {
const result = this.assertCapabilities([capability]);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
}
const datasetName = datasetParentName + "/" + name;
// TODO: support arbitrary values from config
// TODO: support arbitrary props from volume_context
let volumeProperties = {};
// set user-supplied properties
// this come from volume_context from keys starting with property.<foo>
const base_key = "property.";
const prefixLength = `${base_key}`.length;
Object.keys(volume_context).forEach(function (key) {
if (key.startsWith(base_key)) {
let normalizedKey = key.slice(prefixLength);
volumeProperties[normalizedKey] = volume_context[key];
}
});
// set standard properties
volumeProperties[VOLUME_CSI_NAME_PROPERTY_NAME] = name;
volumeProperties[MANAGED_PROPERTY_NAME] = "true";
volumeProperties[VOLUME_CONTEXT_PROVISIONER_DRIVER_PROPERTY_NAME] =
driver.options.driver;
if (driver.options.instance_id) {
volumeProperties[VOLUME_CONTEXT_PROVISIONER_INSTANCE_ID_PROPERTY_NAME] =
driver.options.instance_id;
}
volumeProperties[SUCCESS_PROPERTY_NAME] = "true";
// NOTE: setting mountpoint will automatically create the full path as necessary so no need for mkdir etc
volumeProperties["mountpoint"] = target_path;
// does not really make sense for ephemeral volumes..but we'll put it here in case
if (readonly) {
volumeProperties["readonly"] = "on";
}
// set driver config properties
if (this.options.zfs.properties) {
Object.keys(driver.options.zfs.properties).forEach(function (key) {
const value = driver.options.zfs.properties[key]["value"];
const allowOverride =
"allowOverride" in driver.options.zfs.properties[key]
? driver.options.zfs.properties[key]["allowOverride"]
: true;
if (!allowOverride || !(key in volumeProperties)) {
volumeProperties[key] = value;
}
});
}
// TODO: catch out of space errors and return specifc grpc message?
await zb.zfs.create(datasetName, {
parents: true,
properties: volumeProperties,
});
return {};
}
/**
* This should destroy the dataset and remove target_path as appropriate
*
*{
"target_path": "/var/lib/kubelet/pods/f8b237db-19e8-44ae-b1d2-740c9aeea702/volumes/kubernetes.io~csi/my-volume-0/mount",
"volume_id": "csi-8228252978a824126924de00126e6aec7c989a48a39d577bd3ab718647df5555"
}
*
* @param {*} call
*/
async NodeUnpublishVolume(call) {
const zb = this.getZetabyte();
const filesystem = new Filesystem();
let result;
const volume_id = call.request.volume_id;
const target_path = call.request.target_path;
let datasetParentName = this.getVolumeParentDatasetName();
let name = volume_id;
if (!datasetParentName) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing datasetParentName`
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
if (!target_path) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`target_path is required`
);
}
const datasetName = datasetParentName + "/" + name;
// NOTE: -f does NOT allow deletes if dependent filesets exist
// NOTE: -R will recursively delete items + dependent filesets
// delete dataset
try {
await zb.zfs.destroy(datasetName, { recurse: true, force: true });
} catch (err) {
if (err.toString().includes("filesystem has dependent clones")) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
"filesystem has dependent clones"
);
}
throw err;
}
// cleanup publish directory
result = await filesystem.pathExists(target_path);
if (result) {
if (fs.lstatSync(target_path).isDirectory()) {
result = await filesystem.rmdir(target_path);
} else {
result = await filesystem.rm([target_path]);
}
}
return {};
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
const driver = this;
const zb = this.getZetabyte();
let datasetParentName = this.getVolumeParentDatasetName();
if (!datasetParentName) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`invalid configuration: missing datasetParentName`
);
}
if (call.request.volume_capabilities) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { available_capacity: 0 };
}
}
const datasetName = datasetParentName;
let properties;
properties = await zb.zfs.get(datasetName, ["avail"]);
properties = properties[datasetName];
return { available_capacity: properties.available.value };
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ZfsLocalEphemeralInlineDriver = ZfsLocalEphemeralInlineDriver;

View File

@ -1,248 +0,0 @@
const Client = require("ssh2").Client;
const { E_CANCELED, Mutex } = require("async-mutex");
const GeneralUtils = require("./general");
class SshClient {
constructor(options = {}) {
const client = this;
this.options = options;
this.options.connection = this.options.connection || {};
if (this.options.logger) {
this.logger = this.options.logger;
} else {
this.logger = console;
console.silly = console.debug;
}
if (!this.options.connection.hasOwnProperty("keepaliveInterval")) {
this.options.connection.keepaliveInterval = 10000;
}
if (this.options.connection.debug === true) {
this.options.connection.debug = function (msg) {
client.debug(msg);
};
}
this.conn_mutex = new Mutex();
this.conn_state;
this.conn_err;
this.ready_event_count = 0;
this.error_event_count = 0;
this.conn = new Client();
// invoked before close
this.conn.on("end", () => {
this.conn_state = "ended";
this.debug("Client :: end");
});
// invoked after end
this.conn.on("close", () => {
this.conn_state = "closed";
this.debug("Client :: close");
});
this.conn.on("error", (err) => {
this.conn_state = "error";
this.conn_err = err;
this.error_event_count++;
this.debug("Client :: error");
});
this.conn.on("ready", () => {
this.conn_state = "ready";
this.ready_event_count++;
this.debug("Client :: ready");
});
}
/**
* Build a command line from the name and given args
* TODO: escape the arguments
*
* @param {*} name
* @param {*} args
*/
buildCommand(name, args = []) {
args.unshift(name);
return args.join(" ");
}
debug() {
this.logger.silly(...arguments);
}
async _connect() {
const start_ready_event_count = this.ready_event_count;
const start_error_event_count = this.error_event_count;
try {
await this.conn_mutex.runExclusive(async () => {
this.conn.connect(this.options.connection);
do {
if (start_error_event_count != this.error_event_count) {
throw this.conn_err;
}
if (start_ready_event_count != this.ready_event_count) {
break;
}
await GeneralUtils.sleep(100);
} while (true);
});
} catch (err) {
if (err === E_CANCELED) {
return;
}
throw err;
}
}
async connect() {
if (this.conn_state == "ready") {
return;
}
return this._connect();
}
async exec(command, options = {}, stream_proxy = null) {
// default is to reuse
if (process.env.SSH_REUSE_CONNECTION == "0") {
return this._nexec(...arguments);
} else {
return this._rexec(...arguments);
}
}
async _rexec(command, options = {}, stream_proxy = null) {
const client = this;
const conn = this.conn;
return new Promise(async (resolve, reject) => {
do {
try {
await this.connect();
conn.exec(command, options, function (err, stream) {
if (err) {
reject(err);
return;
}
let stderr;
let stdout;
if (stream_proxy) {
stream_proxy.on("kill", (signal) => {
stream.destroy();
});
}
stream
.on("close", function (code, signal) {
client.debug(
"Stream :: close :: code: " + code + ", signal: " + signal
);
if (stream_proxy) {
stream_proxy.emit("close", ...arguments);
}
resolve({ stderr, stdout, code, signal });
//conn.end();
})
.on("data", function (data) {
client.debug("STDOUT: " + data);
if (stream_proxy) {
stream_proxy.stdout.emit("data", ...arguments);
}
if (stdout == undefined) {
stdout = "";
}
stdout = stdout.concat(data);
})
.stderr.on("data", function (data) {
client.debug("STDERR: " + data);
if (stream_proxy) {
stream_proxy.stderr.emit("data", ...arguments);
}
if (stderr == undefined) {
stderr = "";
}
stderr = stderr.concat(data);
});
});
break;
} catch (err) {
if (err.message && !err.message.includes("Not connected")) {
throw err;
}
}
await GeneralUtils.sleep(1000);
} while (true);
});
}
async _nexec(command, options = {}, stream_proxy = null) {
const client = this;
return new Promise((resolve, reject) => {
var conn = new Client();
conn
.on("error", function (err) {
client.debug("Client :: error");
reject(err);
})
.on("ready", function () {
client.debug("Client :: ready");
//options.pty = true;
//options.env = {
// TERM: "",
//};
conn.exec(command, options, function (err, stream) {
if (err) {
reject(err);
return;
}
let stderr;
let stdout;
stream
.on("close", function (code, signal) {
client.debug(
"Stream :: close :: code: " + code + ", signal: " + signal
);
if (stream_proxy) {
stream_proxy.emit("close", ...arguments);
}
resolve({ stderr, stdout, code, signal });
conn.end();
})
.on("data", function (data) {
client.debug("STDOUT: " + data);
if (stream_proxy) {
stream_proxy.stdout.emit("data", ...arguments);
}
if (stdout == undefined) {
stdout = "";
}
stdout = stdout.concat(data);
})
.stderr.on("data", function (data) {
client.debug("STDERR: " + data);
if (stream_proxy) {
stream_proxy.stderr.emit("data", ...arguments);
}
if (stderr == undefined) {
stderr = "";
}
stderr = stderr.concat(data);
});
});
})
.connect(client.options.connection);
if (stream_proxy) {
stream_proxy.on("kill", (signal) => {
conn.end();
});
}
});
}
}
module.exports.SshClient = SshClient;