Merge branch 'democratic-csi:master' into detachedSnapshotsDatasetParentName
This commit is contained in:
commit
d2b9068a23
|
|
@ -1,3 +1,5 @@
|
|||
# https://www.truenas.com/software-status/
|
||||
|
||||
name: CI
|
||||
|
||||
on:
|
||||
|
|
@ -13,14 +15,14 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cancel Previous Runs
|
||||
uses: styfle/cancel-workflow-action@0.6.0
|
||||
uses: styfle/cancel-workflow-action@0.11.0
|
||||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
build-npm-linux-amd64:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
|
|
@ -29,7 +31,7 @@ jobs:
|
|||
run: |
|
||||
ci/bin/build.sh
|
||||
- name: upload build
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
path: node_modules-linux-amd64.tar.gz
|
||||
|
|
@ -38,7 +40,7 @@ jobs:
|
|||
build-npm-windows-amd64:
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
|
|
@ -47,7 +49,7 @@ jobs:
|
|||
run: |
|
||||
ci\bin\build.ps1
|
||||
- name: upload build
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: node-modules-windows-amd64
|
||||
path: node_modules-windows-amd64.tar.gz
|
||||
|
|
@ -67,8 +69,8 @@ jobs:
|
|||
- X64
|
||||
- csi-sanity-synology
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -97,8 +99,8 @@ jobs:
|
|||
- X64
|
||||
- csi-sanity-synology
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -113,39 +115,6 @@ jobs:
|
|||
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
|
||||
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
|
||||
|
||||
# api-based drivers
|
||||
csi-sanity-truenas-scale-22_02:
|
||||
needs:
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- truenas/scale/22.02/scale-iscsi.yaml
|
||||
- truenas/scale/22.02/scale-nfs.yaml
|
||||
# 80 char limit
|
||||
- truenas/scale/22.02/scale-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-truenas
|
||||
#- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_22_02_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||
|
||||
csi-sanity-truenas-scale-22_12:
|
||||
needs:
|
||||
- build-npm-linux-amd64
|
||||
|
|
@ -164,8 +133,8 @@ jobs:
|
|||
#- csi-sanity-truenas
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -178,40 +147,6 @@ jobs:
|
|||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||
|
||||
# ssh-based drivers
|
||||
csi-sanity-truenas-core-12_0:
|
||||
needs:
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# 63 char limit
|
||||
- truenas/core/12.0/core-iscsi.yaml
|
||||
- truenas/core/12.0/core-nfs.yaml
|
||||
# 80 char limit
|
||||
- truenas/core/12.0/core-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
#- csi-sanity-truenas
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_CORE_12_0_HOST }}
|
||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||
|
||||
# ssh-based drivers
|
||||
csi-sanity-truenas-core-13_0:
|
||||
needs:
|
||||
|
|
@ -231,8 +166,8 @@ jobs:
|
|||
#- csi-sanity-truenas
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -256,14 +191,15 @@ jobs:
|
|||
- zfs-generic/iscsi.yaml
|
||||
- zfs-generic/nfs.yaml
|
||||
- zfs-generic/smb.yaml
|
||||
- zfs-generic/nvmeof.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -292,8 +228,8 @@ jobs:
|
|||
- X64
|
||||
- csi-sanity-client
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -320,8 +256,8 @@ jobs:
|
|||
- X64
|
||||
- csi-sanity-client
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-windows-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -350,8 +286,8 @@ jobs:
|
|||
- X64
|
||||
- csi-sanity-zfs-local
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -389,8 +325,8 @@ jobs:
|
|||
- X64
|
||||
- csi-sanity-local-hostpath
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.npmartifact }}
|
||||
- name: csi-sanity
|
||||
|
|
@ -413,8 +349,8 @@ jobs:
|
|||
- Windows
|
||||
- X64
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: node-modules-windows-amd64
|
||||
- name: csi-sanity
|
||||
|
|
@ -457,8 +393,7 @@ jobs:
|
|||
- determine-image-tag
|
||||
- csi-sanity-synology-dsm6
|
||||
- csi-sanity-synology-dsm7
|
||||
- csi-sanity-truenas-scale-22_02
|
||||
- csi-sanity-truenas-core-12_0
|
||||
- csi-sanity-truenas-scale-22_12
|
||||
- csi-sanity-truenas-core-13_0
|
||||
- csi-sanity-zfs-generic
|
||||
- csi-sanity-client
|
||||
|
|
@ -468,7 +403,7 @@ jobs:
|
|||
- csi-sanity-windows-node
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: docker build
|
||||
run: |
|
||||
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
|
||||
|
|
@ -496,8 +431,7 @@ jobs:
|
|||
needs:
|
||||
- csi-sanity-synology-dsm6
|
||||
- csi-sanity-synology-dsm7
|
||||
- csi-sanity-truenas-scale-22_02
|
||||
- csi-sanity-truenas-core-12_0
|
||||
- csi-sanity-truenas-scale-22_12
|
||||
- csi-sanity-truenas-core-13_0
|
||||
- csi-sanity-zfs-generic
|
||||
- csi-sanity-client
|
||||
|
|
@ -519,7 +453,7 @@ jobs:
|
|||
nano_base_tag: ltsc2022
|
||||
file: Dockerfile.Windows
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: docker build
|
||||
shell: bash
|
||||
run: |
|
||||
|
|
@ -531,7 +465,7 @@ jobs:
|
|||
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
|
||||
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||
- name: upload image tar
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||
|
|
@ -546,7 +480,7 @@ jobs:
|
|||
- self-hosted
|
||||
- buildah
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: democratic-csi-windows-ltsc2019.tar
|
||||
|
|
|
|||
36
CHANGELOG.md
36
CHANGELOG.md
|
|
@ -1,3 +1,39 @@
|
|||
# v1.8.3
|
||||
|
||||
Released 2023-04-02
|
||||
|
||||
- fix invalid `access_mode` logic (see #287)
|
||||
|
||||
# v1.8.2
|
||||
|
||||
Released 2023-04-02
|
||||
|
||||
- more comprehensive support to manually set `access_modes`
|
||||
- more intelligent handling of `access_modes` when `access_type=block`
|
||||
- https://github.com/ceph/ceph-csi/blob/devel/examples/README.md#how-to-test-rbd-multi_node_multi_writer-block-feature
|
||||
- others? allow this by default
|
||||
- remove older versions of TrueNAS from ci
|
||||
|
||||
# v1.8.1
|
||||
|
||||
Released 2023-02-25
|
||||
|
||||
- minor fixes
|
||||
- updated `nvmeof` docs
|
||||
|
||||
# v1.8.0
|
||||
|
||||
Released 2023-02-23
|
||||
|
||||
- `nvmeof` support
|
||||
|
||||
# v1.7.7
|
||||
|
||||
Released 2022-10-17
|
||||
|
||||
- support `csi.access_modes` config value in all zfs-based drivers
|
||||
- bump deps
|
||||
|
||||
# v1.7.6
|
||||
|
||||
Released 2022-08-06
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ COPY --from=build /usr/local/lib/nodejs/bin/node /usr/local/bin/node
|
|||
# netbase is required by rpcbind/rpcinfo to work properly
|
||||
# /etc/{services,rpc} are required
|
||||
RUN apt-get update && \
|
||||
apt-get install -y netbase socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux && \
|
||||
apt-get install -y netbase socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# controller requirements
|
||||
|
|
|
|||
211
README.md
211
README.md
|
|
@ -1,5 +1,5 @@
|
|||

|
||||

|
||||

|
||||
[](https://artifacthub.io/packages/search?repo=democratic-csi)
|
||||
|
||||
# Introduction
|
||||
|
|
@ -24,6 +24,8 @@ have access to resizing, snapshots, clones, etc functionality.
|
|||
- `freenas-api-smb` experimental use with SCALE only (manages zfs datasets to share over smb)
|
||||
- `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-generic-smb` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-generic-nvmeof` (works with any ZoL installation...ie: Ubuntu)
|
||||
- `zfs-local-ephemeral-inline` (provisions node-local zfs datasets)
|
||||
- `zfs-local-dataset` (provision node-local volume as dataset)
|
||||
- `zfs-local-zvol` (provision node-local volume as zvol)
|
||||
|
|
@ -36,7 +38,8 @@ have access to resizing, snapshots, clones, etc functionality.
|
|||
for all volumes)
|
||||
- `local-hostpath` (crudely provisions node-local directories)
|
||||
- `node-manual` (allows connecting to manually created smb, nfs, lustre,
|
||||
oneclient, and iscsi volumes, see sample PVs in the `examples` directory)
|
||||
oneclient, nvmeof, and iscsi volumes, see sample PVs in the `examples`
|
||||
directory)
|
||||
- framework for developing `csi` drivers
|
||||
|
||||
If you have any interest in providing a `csi` driver, simply open an issue to
|
||||
|
|
@ -67,21 +70,21 @@ You should install/configure the requirements for both nfs and iscsi.
|
|||
|
||||
### cifs
|
||||
|
||||
```
|
||||
RHEL / CentOS
|
||||
```bash
|
||||
# RHEL / CentOS
|
||||
sudo yum install -y cifs-utils
|
||||
|
||||
Ubuntu / Debian
|
||||
# Ubuntu / Debian
|
||||
sudo apt-get install -y cifs-utils
|
||||
```
|
||||
|
||||
### nfs
|
||||
|
||||
```
|
||||
RHEL / CentOS
|
||||
```bash
|
||||
# RHEL / CentOS
|
||||
sudo yum install -y nfs-utils
|
||||
|
||||
Ubuntu / Debian
|
||||
# Ubuntu / Debian
|
||||
sudo apt-get install -y nfs-common
|
||||
```
|
||||
|
||||
|
|
@ -96,7 +99,7 @@ If you are running Kubernetes with rancher/rke please see the following:
|
|||
|
||||
#### RHEL / CentOS
|
||||
|
||||
```
|
||||
```bash
|
||||
# Install the following system packages
|
||||
sudo yum install -y lsscsi iscsi-initiator-utils sg3_utils device-mapper-multipath
|
||||
|
||||
|
|
@ -135,32 +138,40 @@ sudo systemctl enable open-iscsi.service
|
|||
sudo service open-iscsi start
|
||||
sudo systemctl status open-iscsi
|
||||
```
|
||||
|
||||
#### [Talos](https://www.talos.dev/)
|
||||
|
||||
To use iscsi storage in kubernetes cluster in talos these steps are needed which are similar to the ones explained in https://www.talos.dev/v1.1/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/#patching-the-jiva-installation
|
||||
|
||||
##### Patch nodes
|
||||
|
||||
since talos does not have iscsi support by default, the iscsi extension is needed
|
||||
create a `patch.yaml` file with
|
||||
|
||||
```yaml
|
||||
- op: add
|
||||
path: /machine/install/extensions
|
||||
value:
|
||||
- image: ghcr.io/siderolabs/iscsi-tools:v0.1.1
|
||||
```
|
||||
|
||||
and apply the patch across all of your nodes
|
||||
|
||||
```bash
|
||||
talosctl -e <endpoint ip/hostname> -n <node ip/hostname> patch mc -p @patch.yaml
|
||||
```
|
||||
|
||||
the extension will not activate until you "upgrade" the nodes, even if there is no update, use the latest version of talos installer.
|
||||
VERIFY THE TALOS VERSION IN THIS COMMAND BEFORE RUNNING IT AND READ THE [OpenEBS Jiva](https://www.talos.dev/v1.1/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/#patching-the-jiva-installation).
|
||||
upgrade all of the nodes in the cluster to get the extension
|
||||
|
||||
```bash
|
||||
talosctl -e <endpoint ip/hostname> -n <node ip/hostname> upgrade --image=ghcr.io/siderolabs/installer:v1.1.1
|
||||
```
|
||||
|
||||
in your `values.yaml` file make sure to enable these settings
|
||||
```yaml
|
||||
|
||||
```yaml
|
||||
node:
|
||||
hostPID: true
|
||||
driver:
|
||||
|
|
@ -172,17 +183,47 @@ node:
|
|||
iscsiDirHostPath: /usr/local/etc/iscsi
|
||||
iscsiDirHostPathType: ""
|
||||
```
|
||||
|
||||
and continue your democratic installation as usuall with other iscsi drivers.
|
||||
|
||||
|
||||
### freenas-smb
|
||||
|
||||
If using with Windows based machines you may need to enable guest access (even
|
||||
if you are connecting with credentials)
|
||||
|
||||
#### Privilged Namespace
|
||||
democratic-csi requires privileged access to the nodes, so the namespace should allow for privileged pods. One way of doing it is via [namespace labels](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/).
|
||||
Add the followin label to the democratic-csi installation namespace `pod-security.kubernetes.io/enforce=privileged`
|
||||
```
|
||||
Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters AllowInsecureGuestAuth -Value 1
|
||||
Restart-Service LanmanWorkstation -Force
|
||||
kubectl label --overwrite namespace democratic-csi pod-security.kubernetes.io/enforce=privileged
|
||||
```
|
||||
|
||||
### nvmeof
|
||||
|
||||
```bash
|
||||
# not required but likely helpful (tools are included in the democratic images
|
||||
# so not needed on the host)
|
||||
apt-get install -y nvme-cli
|
||||
|
||||
# get the nvme fabric modules
|
||||
apt-get install linux-generic
|
||||
|
||||
# ensure the nvmeof modules get loaded at boot
|
||||
cat <<EOF > /etc/modules-load.d/nvme.conf
|
||||
nvme
|
||||
nvme-tcp
|
||||
nvme-fc
|
||||
nvme-rdma
|
||||
EOF
|
||||
|
||||
# load the modules immediately
|
||||
modprobe nvme
|
||||
modprobe nvme-tcp
|
||||
modprobe nvme-fc
|
||||
modprobe nvme-rdma
|
||||
|
||||
# nvme has native multipath or can use DM multipath
|
||||
# democratic-csi will gracefully handle either configuration
|
||||
# RedHat recommends DM multipath (nvme_core.multipath=N)
|
||||
cat /sys/module/nvme_core/parameters/multipath
|
||||
|
||||
# kernel arg to enable/disable native multipath
|
||||
nvme_core.multipath=N
|
||||
```
|
||||
|
||||
### zfs-local-ephemeral-inline
|
||||
|
|
@ -237,17 +278,43 @@ linux nodes as well (using the `ntfs3` driver) so volumes created can be
|
|||
utilized by nodes with either operating system (in the case of `cifs` by both
|
||||
simultaneously).
|
||||
|
||||
If using any `-iscsi` driver be sure your iqns are always fully lower-case by
|
||||
default (https://github.com/PowerShell/PowerShell/issues/17306).
|
||||
|
||||
Due to current limits in the kubernetes tooling it is not possible to use the
|
||||
`local-hostpath` driver but support is implemented in this project and will
|
||||
work as soon as kubernetes support is available.
|
||||
|
||||
```
|
||||
```powershell
|
||||
# ensure all updates are installed
|
||||
|
||||
# enable the container feature
|
||||
Enable-WindowsOptionalFeature -Online -FeatureName Containers –All
|
||||
|
||||
# install a HostProcess compatible kubernetes
|
||||
|
||||
# smb support
|
||||
# If using with Windows based machines you may need to enable guest access
|
||||
# (even if you are connecting with credentials)
|
||||
Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters AllowInsecureGuestAuth -Value 1
|
||||
Restart-Service LanmanWorkstation -Force
|
||||
|
||||
# iscsi
|
||||
# enable iscsi service and mpio as appropriate
|
||||
Get-Service -Name MSiSCSI
|
||||
Set-Service -Name MSiSCSI -StartupType Automatic
|
||||
Start-Service -Name MSiSCSI
|
||||
Get-Service -Name MSiSCSI
|
||||
|
||||
# mpio
|
||||
Get-WindowsFeature -Name 'Multipath-IO'
|
||||
Add-WindowsFeature -Name 'Multipath-IO'
|
||||
|
||||
Enable-MSDSMAutomaticClaim -BusType "iSCSI"
|
||||
Disable-MSDSMAutomaticClaim -BusType "iSCSI"
|
||||
|
||||
Get-MSDSMGlobalDefaultLoadBalancePolicy
|
||||
Set-MSDSMGlobalLoadBalancePolicy -Policy RR
|
||||
```
|
||||
|
||||
- https://kubernetes.io/blog/2021/08/16/windows-hostprocess-containers/
|
||||
|
|
@ -353,7 +420,7 @@ Issues to review:
|
|||
- https://jira.ixsystems.com/browse/NAS-108522
|
||||
- https://jira.ixsystems.com/browse/NAS-107219
|
||||
|
||||
### ZoL (zfs-generic-nfs, zfs-generic-iscsi, zfs-generic-smb)
|
||||
### ZoL (zfs-generic-nfs, zfs-generic-iscsi, zfs-generic-smb, zfs-generic-nvmeof)
|
||||
|
||||
Ensure ssh and zfs is installed on the nfs/iscsi server and that you have installed
|
||||
`targetcli`.
|
||||
|
|
@ -367,7 +434,7 @@ unecessarily:
|
|||
- https://github.com/democratic-csi/democratic-csi/issues/151 (some notes on
|
||||
using delegated zfs permissions)
|
||||
|
||||
```
|
||||
```bash
|
||||
####### nfs
|
||||
yum install -y nfs-utils
|
||||
systemctl enable --now nfs-server.service
|
||||
|
|
@ -389,6 +456,77 @@ passwd smbroot (optional)
|
|||
|
||||
# create smb user and set password
|
||||
smbpasswd -L -a smbroot
|
||||
|
||||
####### nvmeof
|
||||
# ensure nvmeof target modules are loaded at startup
|
||||
cat <<EOF > /etc/modules-load.d/nvmet.conf
|
||||
nvmet
|
||||
nvmet-tcp
|
||||
nvmet-fc
|
||||
nvmet-rdma
|
||||
EOF
|
||||
|
||||
# load the modules immediately
|
||||
modprobe nvmet
|
||||
modprobe nvmet-tcp
|
||||
modprobe nvmet-fc
|
||||
modprobe nvmet-rdma
|
||||
|
||||
# install nvmetcli and systemd services
|
||||
git clone git://git.infradead.org/users/hch/nvmetcli.git
|
||||
cd nvmetcli
|
||||
|
||||
## install globally
|
||||
python3 setup.py install --prefix=/usr
|
||||
pip install configshell_fb
|
||||
|
||||
## install to root home dir
|
||||
python3 setup.py install --user
|
||||
pip install configshell_fb --user
|
||||
|
||||
# prevent log files from filling up disk
|
||||
ln -sf /dev/null ~/.nvmetcli/log.txt
|
||||
ln -sf /dev/null ~/.nvmetcli/history.txt
|
||||
|
||||
# install systemd unit and enable/start
|
||||
## optionally to ensure the config file is loaded before we start
|
||||
## reading/writing to it add an ExecStartPost= to the unit file
|
||||
##
|
||||
## ExecStartPost=/usr/bin/touch /var/run/nvmet-config-loaded
|
||||
##
|
||||
## in your dirver config set nvmeof.shareStrategyNvmetCli.configIsImportedFilePath=/var/run/nvmet-config-loaded
|
||||
## which will prevent the driver from making any changes until the configured
|
||||
## file is present
|
||||
vi nvmet.service
|
||||
|
||||
cp nvmet.service /etc/systemd/system/
|
||||
mkdir -p /etc/nvmet
|
||||
systemctl daemon-reload
|
||||
systemctl enable --now nvmet.service
|
||||
systemctl status nvmet.service
|
||||
|
||||
# create the port(s) configuration manually
|
||||
echo "
|
||||
cd /
|
||||
ls
|
||||
" | nvmetcli
|
||||
|
||||
# do this multiple times altering as appropriate if you have/want multipath
|
||||
# change the port to 2, 3.. each additional path
|
||||
# the below example creates a tcp port listening on all IPs on port 4420
|
||||
echo "
|
||||
cd /ports
|
||||
create 1
|
||||
cd 1
|
||||
set addr adrfam=ipv4 trtype=tcp traddr=0.0.0.0 trsvcid=4420
|
||||
|
||||
saveconfig /etc/nvmet/config.json
|
||||
" | nvmetcli
|
||||
|
||||
# if running TrueNAS SCALE you can skip the above and simply copy
|
||||
# contrib/scale-nvmet-start.sh to your machine and add it as a startup script
|
||||
# to launch POSTINIT type COMMAND
|
||||
# and then create the port(s) as mentioned above
|
||||
```
|
||||
|
||||
### Synology (synology-iscsi)
|
||||
|
|
@ -397,7 +535,7 @@ Ensure iscsi manager has been installed and is generally setup/configured. DSM 6
|
|||
|
||||
## Helm Installation
|
||||
|
||||
```
|
||||
```bash
|
||||
helm repo add democratic-csi https://democratic-csi.github.io/charts/
|
||||
helm repo update
|
||||
# helm v2
|
||||
|
|
@ -441,13 +579,14 @@ microk8s helm upgrade \
|
|||
|
||||
- microk8s - `/var/snap/microk8s/common/var/lib/kubelet`
|
||||
- pivotal - `/var/vcap/data/kubelet`
|
||||
- k0s - `/var/lib/k0s/kubelet`
|
||||
|
||||
### openshift
|
||||
|
||||
`democratic-csi` generally works fine with openshift. Some special parameters
|
||||
need to be set with helm (support added in chart version `0.6.1`):
|
||||
|
||||
```
|
||||
```bash
|
||||
# for sure required
|
||||
--set node.rbac.openshift.privileged=true
|
||||
--set node.driver.localtimeHostPath=false
|
||||
|
|
@ -461,6 +600,11 @@ need to be set with helm (support added in chart version `0.6.1`):
|
|||
`democratic-csi` works with Nomad in a functioning but limted capacity. See the
|
||||
[Nomad docs](docs/nomad.md) for details.
|
||||
|
||||
### Docker Swarm
|
||||
|
||||
- https://github.com/moby/moby/blob/master/docs/cluster_volumes.md
|
||||
- https://github.com/olljanat/csi-plugins-for-docker-swarm
|
||||
|
||||
## Multiple Deployments
|
||||
|
||||
You may install multiple deployments of each/any driver. It requires the
|
||||
|
|
@ -479,25 +623,14 @@ following:
|
|||
|
||||
# Snapshot Support
|
||||
|
||||
Install beta (v1.17+) CRDs (once per cluster):
|
||||
|
||||
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd
|
||||
|
||||
```
|
||||
kubectl apply -f snapshot.storage.k8s.io_volumesnapshotclasses.yaml
|
||||
kubectl apply -f snapshot.storage.k8s.io_volumesnapshotcontents.yaml
|
||||
kubectl apply -f snapshot.storage.k8s.io_volumesnapshots.yaml
|
||||
```
|
||||
|
||||
Install snapshot controller (once per cluster):
|
||||
|
||||
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/deploy/kubernetes/snapshot-controller
|
||||
- https://github.com/democratic-csi/charts/tree/master/stable/snapshot-controller
|
||||
|
||||
```
|
||||
# replace namespace references to your liking
|
||||
kubectl apply -f rbac-snapshot-controller.yaml
|
||||
kubectl apply -f setup-snapshot-controller.yaml
|
||||
```
|
||||
OR
|
||||
|
||||
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd
|
||||
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/deploy/kubernetes/snapshot-controller
|
||||
|
||||
Install `democratic-csi` as usual with `volumeSnapshotClasses` defined as appropriate.
|
||||
|
||||
|
|
|
|||
|
|
@ -19,32 +19,31 @@ if (! $env:CSI_SANITY_FAILFAST) {
|
|||
$env:CSI_SANITY_FAILFAST = "false"
|
||||
}
|
||||
|
||||
$failfast = ""
|
||||
|
||||
if ($env:CSI_SANITY_FAILFAST -eq "true") {
|
||||
$failfast = "-ginkgo.failFast"
|
||||
}
|
||||
|
||||
Write-Output "launching csi-sanity"
|
||||
Write-Output "connecting to: ${endpoint}"
|
||||
Write-Output "failfast: ${env:CSI_SANITY_FAILFAST}"
|
||||
Write-Output "skip: ${env:CSI_SANITY_SKIP}"
|
||||
Write-Output "focus: ${env:CSI_SANITY_FOCUS}"
|
||||
Write-Output "csi.mountdir: ${env:CSI_SANITY_TEMP_DIR}\mnt"
|
||||
Write-Output "csi.stagingdir: ${env:CSI_SANITY_TEMP_DIR}\stage"
|
||||
|
||||
$skip = '"' + ${env:CSI_SANITY_SKIP} + '"'
|
||||
$focus = '"' + ${env:CSI_SANITY_FOCUS} + '"'
|
||||
$exe = "csi-sanity.exe"
|
||||
$exeargs = @()
|
||||
$exeargs += "-csi.endpoint", "unix://${endpoint}"
|
||||
$exeargs += "-csi.mountdir", "${env:CSI_SANITY_TEMP_DIR}\mnt"
|
||||
$exeargs += "-csi.stagingdir", "${env:CSI_SANITY_TEMP_DIR}\stage"
|
||||
$exeargs += "-csi.testvolumeexpandsize", "2147483648"
|
||||
$exeargs += "-csi.testvolumesize", "1073741824"
|
||||
$exeargs += "-ginkgo.skip", "${env:CSI_SANITY_SKIP}"
|
||||
$exeargs += "-ginkgo.focus", "${env:CSI_SANITY_FOCUS}"
|
||||
|
||||
csi-sanity.exe -"csi.endpoint" "unix://${endpoint}" `
|
||||
$failfast `
|
||||
-"csi.mountdir" "${env:CSI_SANITY_TEMP_DIR}\mnt" `
|
||||
-"csi.stagingdir" "${env:CSI_SANITY_TEMP_DIR}\stage" `
|
||||
-"csi.testvolumeexpandsize" 2147483648 `
|
||||
-"csi.testvolumesize" 1073741824 `
|
||||
-"ginkgo.skip" $skip `
|
||||
-"ginkgo.focus" $focus
|
||||
if ($env:CSI_SANITY_FAILFAST -eq "true") {
|
||||
$exeargs += "-ginkgo.fail-fast"
|
||||
}
|
||||
|
||||
# does not work the same as linux for some reason
|
||||
# -"ginkgo.skip" "'" + ${env:CSI_SANITY_SKIP} + "'" `
|
||||
Write-Output "csi-sanity command: $exe $($exeargs -join ' ')"
|
||||
|
||||
&$exe $exeargs
|
||||
|
||||
if (-not $?) {
|
||||
$exit_code = $LASTEXITCODE
|
||||
|
|
|
|||
|
|
@ -0,0 +1,30 @@
|
|||
driver: zfs-generic-nvmeof
|
||||
|
||||
sshConnection:
|
||||
host: ${SERVER_HOST}
|
||||
port: 22
|
||||
username: ${SERVER_USERNAME}
|
||||
password: ${SERVER_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
|
||||
nvmeof:
|
||||
transports:
|
||||
- "tcp://${SERVER_HOST}:4420"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
shareStrategy: "nvmetCli"
|
||||
shareStrategyNvmetCli:
|
||||
basename: "nqn.2003-01.org.linux-nvmeof.ubuntu-19.x8664"
|
||||
ports:
|
||||
- "1"
|
||||
subsystem:
|
||||
attributes:
|
||||
allow_any_host: 1
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
#!/bin/bash
|
||||
|
||||
# simple script to 'start' nvmet on TrueNAS SCALE
|
||||
#
|
||||
# to reinstall nvmetcli simply rm /usr/sbin/nvmetcli
|
||||
|
||||
# debug
|
||||
#set -x
|
||||
|
||||
# exit non-zero
|
||||
set -e
|
||||
|
||||
SCRIPTDIR="$(
|
||||
cd -- "$(dirname "$0")" >/dev/null 2>&1
|
||||
pwd -P
|
||||
)"
|
||||
cd "${SCRIPTDIR}"
|
||||
|
||||
: "${NVMETCONFIG:="${SCRIPTDIR}/nvmet-config.json"}"
|
||||
|
||||
export PATH=${HOME}/.local/bin:${PATH}
|
||||
|
||||
modules=()
|
||||
modules+=("nvmet")
|
||||
modules+=("nvmet-fc")
|
||||
modules+=("nvmet-rdma")
|
||||
modules+=("nvmet-tcp")
|
||||
|
||||
for module in "${modules[@]}"; do
|
||||
modprobe "${module}"
|
||||
done
|
||||
|
||||
which nvmetcli &>/dev/null || {
|
||||
which pip &>/dev/null || {
|
||||
wget -O get-pip.py https://bootstrap.pypa.io/get-pip.py
|
||||
python get-pip.py --user
|
||||
rm get-pip.py
|
||||
}
|
||||
|
||||
if [[ ! -d nvmetcli ]]; then
|
||||
git clone git://git.infradead.org/users/hch/nvmetcli.git
|
||||
fi
|
||||
|
||||
cd nvmetcli
|
||||
|
||||
# install to root home dir
|
||||
python3 setup.py install --user
|
||||
|
||||
# install to root home dir
|
||||
pip install configshell_fb --user
|
||||
|
||||
# remove source
|
||||
cd "${SCRIPTDIR}"
|
||||
rm -rf nvmetcli
|
||||
}
|
||||
|
||||
cd "${SCRIPTDIR}"
|
||||
nvmetcli restore "${NVMETCONFIG}"
|
||||
|
||||
touch /var/run/nvmet-config-loaded
|
||||
chmod +r /var/run/nvmet-config-loaded
|
||||
|
|
@ -11,6 +11,10 @@ job "democratic-csi-iscsi-node" {
|
|||
|
||||
env {
|
||||
CSI_NODE_ID = "${attr.unique.hostname}"
|
||||
|
||||
# if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
|
||||
# you can configure the fs detection system used with the following envvar:
|
||||
#FILESYSTEM_TYPE_DETECTION_STRATEGY = "blkid"
|
||||
}
|
||||
|
||||
config {
|
||||
|
|
@ -38,6 +42,15 @@ job "democratic-csi-iscsi-node" {
|
|||
source = "/"
|
||||
readonly=false
|
||||
}
|
||||
|
||||
# if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
|
||||
# you can try uncommenting the following additional mount block:
|
||||
#mount {
|
||||
# type = "bind"
|
||||
# target = "/run/udev"
|
||||
# source = "/run/udev"
|
||||
# readonly = true
|
||||
#}
|
||||
}
|
||||
|
||||
template {
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
driver: lustre-client
|
||||
instance_id:
|
||||
lustre:
|
||||
# <MGS NID>[:<MGS NID>]
|
||||
shareHost: server address
|
||||
shareBasePath: "/some/path"
|
||||
# shareHost:shareBasePath should be mounted at this location in the controller container
|
||||
|
|
|
|||
|
|
@ -0,0 +1,26 @@
|
|||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: nvmeof-manual
|
||||
spec:
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
mountOptions: []
|
||||
csi:
|
||||
driver: org.democratic-csi.node-manual
|
||||
readOnly: false
|
||||
# can be ext4 or xfs
|
||||
fsType: ext4
|
||||
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
||||
volumeAttributes:
|
||||
# rdma and fc are also available
|
||||
transport: tcp://<ip:port>,
|
||||
#transports: <transport>,<transport>,...
|
||||
nqn: <nqn>
|
||||
nsid: <nsid>
|
||||
node_attach_driver: "nvmeof"
|
||||
provisioner_driver: node-manual
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
driver: zfs-generic-nvmeof
|
||||
sshConnection:
|
||||
host: server address
|
||||
port: 22
|
||||
username: root
|
||||
# use either password or key
|
||||
password: ""
|
||||
privateKey: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
...
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
||||
zfs:
|
||||
# can be used to override defaults if necessary
|
||||
# the example below is useful for TrueNAS 12
|
||||
#cli:
|
||||
# sudoEnabled: true
|
||||
# paths:
|
||||
# zfs: /usr/local/sbin/zfs
|
||||
# zpool: /usr/local/sbin/zpool
|
||||
# sudo: /usr/local/bin/sudo
|
||||
# chroot: /usr/sbin/chroot
|
||||
|
||||
# can be used to set arbitrary values on the dataset/zvol
|
||||
# can use handlebars templates with the parameters from the storage class/CO
|
||||
#datasetProperties:
|
||||
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
|
||||
# "org.freenas:test": "{{ parameters.foo }}"
|
||||
# "org.freenas:test2": "some value"
|
||||
|
||||
datasetParentName: tank/k8s/test
|
||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||
# they may be siblings, but neither should be nested in the other
|
||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||
|
||||
# "" (inherit), lz4, gzip-9, etc
|
||||
zvolCompression:
|
||||
# "" (inherit), on, off, verify
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
|
||||
zvolBlocksize:
|
||||
|
||||
nvmeof:
|
||||
# these are for the node/client aspect
|
||||
transports:
|
||||
- tcp://server:port
|
||||
#- "tcp://127.0.0.1:4420?host-iface=eth0"
|
||||
#- "tcp://[2001:123:456::1]:4420"
|
||||
#- "rdma://127.0.0.1:4420"
|
||||
#- "fc://[nn-0x203b00a098cbcac6:pn-0x203d00a098cbcac6]"
|
||||
|
||||
# MUST ensure uniqueness
|
||||
# full iqn limit is 223 bytes, plan accordingly
|
||||
# default is "{{ name }}"
|
||||
#nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
|
||||
namePrefix:
|
||||
nameSuffix:
|
||||
|
||||
shareStrategy: "nvmetCli"
|
||||
#shareStrategy: "spdkCli"
|
||||
|
||||
# https://documentation.suse.com/es-es/sles/15-SP1/html/SLES-all/cha-nvmeof.html
|
||||
# https://www.linuxjournal.com/content/data-flash-part-iii-nvme-over-fabrics-using-tcp
|
||||
# http://git.infradead.org/users/hch/nvmetcli.git
|
||||
shareStrategyNvmetCli:
|
||||
#sudoEnabled: true
|
||||
#nvmetcliPath: nvmetcli
|
||||
# prevent startup race conditions by ensuring the config on disk has been imported
|
||||
# before we start messing with things
|
||||
#configIsImportedFilePath: /var/run/nvmet-config-loaded
|
||||
#configPath: /etc/nvmet/config.json
|
||||
basename: "nqn.2003-01.org.linux-nvme"
|
||||
# add more ports here as appropriate if you have multipath
|
||||
ports:
|
||||
- "1"
|
||||
subsystem:
|
||||
attributes:
|
||||
allow_any_host: 1
|
||||
# not supported yet in nvmetcli
|
||||
#namespace:
|
||||
# attributes:
|
||||
# buffered_io: 1
|
||||
|
||||
shareStrategySpdkCli:
|
||||
# spdkcli.py
|
||||
#spdkcliPath: spdkcli
|
||||
configPath: /etc/spdk/spdk.json
|
||||
basename: "nqn.2003-01.org.linux-nvmeof"
|
||||
bdev:
|
||||
type: uring
|
||||
#type: aio
|
||||
attributes:
|
||||
block_size: 512
|
||||
subsystem:
|
||||
attributes:
|
||||
allow_any_host: "true"
|
||||
listeners:
|
||||
- trtype: tcp
|
||||
traddr: server
|
||||
trsvcid: port
|
||||
adrfam: ipv4
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "democratic-csi",
|
||||
"version": "1.7.7",
|
||||
"version": "1.8.3",
|
||||
"description": "kubernetes csi driver framework",
|
||||
"main": "bin/democratic-csi",
|
||||
"scripts": {
|
||||
|
|
@ -18,13 +18,13 @@
|
|||
"url": "https://github.com/democratic-csi/democratic-csi.git"
|
||||
},
|
||||
"dependencies": {
|
||||
"@grpc/grpc-js": "^1.5.7",
|
||||
"@grpc/grpc-js": "^1.8.4",
|
||||
"@grpc/proto-loader": "^0.7.0",
|
||||
"@kubernetes/client-node": "^0.17.0",
|
||||
"@kubernetes/client-node": "^0.18.0",
|
||||
"async-mutex": "^0.4.0",
|
||||
"axios": "^1.1.3",
|
||||
"bunyan": "^1.8.15",
|
||||
"fs-extra": "^10.1.0",
|
||||
"fs-extra": "^11.1.0",
|
||||
"handlebars": "^4.7.7",
|
||||
"js-yaml": "^4.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
|
|
|
|||
|
|
@ -104,6 +104,33 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
getAccessModes(capability) {
|
||||
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||
if (access_modes !== null) {
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
assertCapabilities(capabilities) {
|
||||
const driver = this;
|
||||
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
|
||||
|
|
@ -126,16 +153,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
!this.getAccessModes(capability).includes(capability.access_mode.mode)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -208,6 +208,49 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
return location;
|
||||
}
|
||||
|
||||
getAccessModes(capability) {
|
||||
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||
if (access_modes !== null) {
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
const driverResourceType = this.getDriverResourceType();
|
||||
switch (driverResourceType) {
|
||||
case "filesystem":
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
break;
|
||||
case "volume":
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
];
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
assertCapabilities(capabilities) {
|
||||
const driverResourceType = this.getDriverResourceType();
|
||||
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
|
||||
|
|
@ -233,16 +276,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
!this.getAccessModes(capability).includes(
|
||||
capability.access_mode.mode
|
||||
)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
|
|
@ -263,15 +299,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
!this.getAccessModes(capability).includes(
|
||||
capability.access_mode.mode
|
||||
)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -3,20 +3,29 @@ const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
|||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const registry = require("../../utils/registry");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const LocalCliExecClient =
|
||||
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
||||
const ISCSI_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:iscsi_assets_name";
|
||||
const NVMEOF_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:nvmeof_assets_name";
|
||||
const __REGISTRY_NS__ = "ControllerZfsGenericDriver";
|
||||
class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
||||
getExecClient() {
|
||||
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
if (this.options.sshConnection) {
|
||||
return new SshClient({
|
||||
logger: this.ctx.logger,
|
||||
connection: this.options.sshConnection,
|
||||
});
|
||||
} else {
|
||||
return new LocalCliExecClient({
|
||||
logger: this.ctx.logger,
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
|
@ -24,7 +33,11 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
||||
const execClient = this.getExecClient();
|
||||
const options = {};
|
||||
options.executor = new ZfsSshProcessManager(execClient);
|
||||
if (this.options.sshConnection) {
|
||||
options.executor = new ZfsSshProcessManager(execClient);
|
||||
} else {
|
||||
options.executor = execClient;
|
||||
}
|
||||
options.idempotent = true;
|
||||
|
||||
if (
|
||||
|
|
@ -55,6 +68,7 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
case "zfs-generic-smb":
|
||||
return "filesystem";
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-nvmeof":
|
||||
return "volume";
|
||||
default:
|
||||
throw new Error("unknown driver: " + this.ctx.args.driver);
|
||||
|
|
@ -164,28 +178,28 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-iscsi": {
|
||||
let basename;
|
||||
let iscsiName;
|
||||
let assetName;
|
||||
|
||||
if (this.options.iscsi.nameTemplate) {
|
||||
iscsiName = Handlebars.compile(this.options.iscsi.nameTemplate)({
|
||||
assetName = Handlebars.compile(this.options.iscsi.nameTemplate)({
|
||||
name: call.request.name,
|
||||
parameters: call.request.parameters,
|
||||
});
|
||||
} else {
|
||||
iscsiName = zb.helpers.extractLeafName(datasetName);
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
}
|
||||
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||
assetName = this.options.iscsi.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.iscsi.nameSuffix) {
|
||||
iscsiName += this.options.iscsi.nameSuffix;
|
||||
assetName += this.options.iscsi.nameSuffix;
|
||||
}
|
||||
|
||||
iscsiName = iscsiName.toLowerCase();
|
||||
assetName = assetName.toLowerCase();
|
||||
|
||||
let extentDiskName = "zvol/" + datasetName;
|
||||
|
||||
|
|
@ -239,20 +253,20 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
`
|
||||
# create target
|
||||
cd /iscsi
|
||||
create ${basename}:${iscsiName}
|
||||
create ${basename}:${assetName}
|
||||
|
||||
# setup tpg
|
||||
cd /iscsi/${basename}:${iscsiName}/tpg1
|
||||
cd /iscsi/${basename}:${assetName}/tpg1
|
||||
${setAttributesText}
|
||||
${setAuthText}
|
||||
|
||||
# create extent
|
||||
cd /backstores/block
|
||||
create ${iscsiName} /dev/${extentDiskName}
|
||||
create ${assetName} /dev/${extentDiskName}
|
||||
|
||||
# add extent to target/tpg
|
||||
cd /iscsi/${basename}:${iscsiName}/tpg1/luns
|
||||
create /backstores/block/${iscsiName}
|
||||
cd /iscsi/${basename}:${assetName}/tpg1/luns
|
||||
create /backstores/block/${assetName}
|
||||
`
|
||||
);
|
||||
},
|
||||
|
|
@ -271,12 +285,12 @@ create /backstores/block/${iscsiName}
|
|||
}
|
||||
|
||||
// iqn = target
|
||||
let iqn = basename + ":" + iscsiName;
|
||||
let iqn = basename + ":" + assetName;
|
||||
this.ctx.logger.info("iqn: " + iqn);
|
||||
|
||||
// store this off to make delete process more bullet proof
|
||||
await zb.zfs.set(datasetName, {
|
||||
[ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName,
|
||||
[ISCSI_ASSETS_NAME_PROPERTY_NAME]: assetName,
|
||||
});
|
||||
|
||||
volume_context = {
|
||||
|
|
@ -290,6 +304,231 @@ create /backstores/block/${iscsiName}
|
|||
lun: 0,
|
||||
};
|
||||
return volume_context;
|
||||
}
|
||||
|
||||
case "zfs-generic-nvmeof": {
|
||||
let basename;
|
||||
let assetName;
|
||||
|
||||
if (this.options.nvmeof.nameTemplate) {
|
||||
assetName = Handlebars.compile(this.options.nvmeof.nameTemplate)({
|
||||
name: call.request.name,
|
||||
parameters: call.request.parameters,
|
||||
});
|
||||
} else {
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
}
|
||||
|
||||
if (this.options.nvmeof.namePrefix) {
|
||||
assetName = this.options.nvmeof.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.nvmeof.nameSuffix) {
|
||||
assetName += this.options.nvmeof.nameSuffix;
|
||||
}
|
||||
|
||||
assetName = assetName.toLowerCase();
|
||||
|
||||
let extentDiskName = "zvol/" + datasetName;
|
||||
|
||||
/**
|
||||
* limit is a FreeBSD limitation
|
||||
* https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
|
||||
*/
|
||||
//if (extentDiskName.length > 63) {
|
||||
// throw new GrpcError(
|
||||
// grpc.status.FAILED_PRECONDITION,
|
||||
// `extent disk name cannot exceed 63 characters: ${extentDiskName}`
|
||||
// );
|
||||
//}
|
||||
|
||||
let namespace = 1;
|
||||
|
||||
switch (this.options.nvmeof.shareStrategy) {
|
||||
case "nvmetCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategyNvmetCli.basename;
|
||||
let savefile = _.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.configPath",
|
||||
""
|
||||
);
|
||||
if (savefile) {
|
||||
savefile = `savefile=${savefile}`;
|
||||
}
|
||||
let setSubsystemAttributesText = "";
|
||||
if (this.options.nvmeof.shareStrategyNvmetCli.subsystem) {
|
||||
if (
|
||||
this.options.nvmeof.shareStrategyNvmetCli.subsystem.attributes
|
||||
) {
|
||||
for (const attributeName in this.options.nvmeof
|
||||
.shareStrategyNvmetCli.subsystem.attributes) {
|
||||
const attributeValue =
|
||||
this.options.nvmeof.shareStrategyNvmetCli.subsystem
|
||||
.attributes[attributeName];
|
||||
setSubsystemAttributesText += "\n";
|
||||
setSubsystemAttributesText += `set attr ${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let portCommands = "";
|
||||
this.options.nvmeof.shareStrategyNvmetCli.ports.forEach(
|
||||
(port) => {
|
||||
portCommands += `
|
||||
cd /ports/${port}/subsystems
|
||||
create ${basename}:${assetName}
|
||||
`;
|
||||
}
|
||||
);
|
||||
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.nvmetCliCommand(
|
||||
`
|
||||
# create subsystem
|
||||
cd /subsystems
|
||||
create ${basename}:${assetName}
|
||||
cd ${basename}:${assetName}
|
||||
${setSubsystemAttributesText}
|
||||
|
||||
# create subsystem namespace
|
||||
cd namespaces
|
||||
create ${namespace}
|
||||
cd ${namespace}
|
||||
set device path=/dev/${extentDiskName}
|
||||
enable
|
||||
|
||||
# associate subsystem/target to port(al)
|
||||
${portCommands}
|
||||
|
||||
saveconfig ${savefile}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
case "spdkCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategySpdkCli.basename;
|
||||
let bdevAttributesText = "";
|
||||
if (this.options.nvmeof.shareStrategySpdkCli.bdev) {
|
||||
if (this.options.nvmeof.shareStrategySpdkCli.bdev.attributes) {
|
||||
for (const attributeName in this.options.nvmeof
|
||||
.shareStrategySpdkCli.bdev.attributes) {
|
||||
const attributeValue =
|
||||
this.options.nvmeof.shareStrategySpdkCli.bdev.attributes[
|
||||
attributeName
|
||||
];
|
||||
bdevAttributesText += `${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let subsystemAttributesText = "";
|
||||
if (this.options.nvmeof.shareStrategySpdkCli.subsystem) {
|
||||
if (
|
||||
this.options.nvmeof.shareStrategySpdkCli.subsystem.attributes
|
||||
) {
|
||||
for (const attributeName in this.options.nvmeof
|
||||
.shareStrategySpdkCli.subsystem.attributes) {
|
||||
const attributeValue =
|
||||
this.options.nvmeof.shareStrategySpdkCli.subsystem
|
||||
.attributes[attributeName];
|
||||
subsystemAttributesText += `${attributeName}=${attributeValue}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let listenerCommands = `cd /nvmf/subsystem/${basename}:${assetName}/listen_addresses\n`;
|
||||
this.options.nvmeof.shareStrategySpdkCli.listeners.forEach(
|
||||
(listener) => {
|
||||
let listenerAttributesText = "";
|
||||
for (const attributeName in listener) {
|
||||
const attributeValue = listener[attributeName];
|
||||
listenerAttributesText += ` ${attributeName}=${attributeValue} `;
|
||||
}
|
||||
listenerCommands += `
|
||||
create ${listenerAttributesText}
|
||||
`;
|
||||
}
|
||||
);
|
||||
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.spdkCliCommand(
|
||||
`
|
||||
# create bdev
|
||||
cd /bdevs/${this.options.nvmeof.shareStrategySpdkCli.bdev.type}
|
||||
create filename=/dev/${extentDiskName} name=${basename}:${assetName} ${bdevAttributesText}
|
||||
|
||||
# create subsystem
|
||||
cd /nvmf/subsystem
|
||||
create nqn=${basename}:${assetName} ${subsystemAttributesText}
|
||||
cd ${basename}:${assetName}
|
||||
|
||||
# create namespace
|
||||
cd /nvmf/subsystem/${basename}:${assetName}/namespaces
|
||||
create bdev_name=${basename}:${assetName} nsid=${namespace}
|
||||
|
||||
# add listener
|
||||
${listenerCommands}
|
||||
|
||||
cd /
|
||||
save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// iqn = target
|
||||
let nqn = basename + ":" + assetName;
|
||||
this.ctx.logger.info("nqn: " + nqn);
|
||||
|
||||
// store this off to make delete process more bullet proof
|
||||
await zb.zfs.set(datasetName, {
|
||||
[NVMEOF_ASSETS_NAME_PROPERTY_NAME]: assetName,
|
||||
});
|
||||
|
||||
volume_context = {
|
||||
node_attach_driver: "nvmeof",
|
||||
transport: this.options.nvmeof.transport || "",
|
||||
transports: this.options.nvmeof.transports
|
||||
? this.options.nvmeof.transports.join(",")
|
||||
: "",
|
||||
nqn,
|
||||
nsid: namespace,
|
||||
};
|
||||
return volume_context;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
|
|
@ -367,9 +606,9 @@ create /backstores/block/${iscsiName}
|
|||
}
|
||||
break;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-iscsi": {
|
||||
let basename;
|
||||
let iscsiName;
|
||||
let assetName;
|
||||
|
||||
// Delete iscsi assets
|
||||
try {
|
||||
|
|
@ -386,23 +625,23 @@ create /backstores/block/${iscsiName}
|
|||
properties = properties[datasetName];
|
||||
this.ctx.logger.debug("zfs props data: %j", properties);
|
||||
|
||||
iscsiName = properties[ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
assetName = properties[ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
|
||||
if (zb.helpers.isPropertyValueSet(iscsiName)) {
|
||||
if (zb.helpers.isPropertyValueSet(assetName)) {
|
||||
//do nothing
|
||||
} else {
|
||||
iscsiName = zb.helpers.extractLeafName(datasetName);
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||
assetName = this.options.iscsi.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.iscsi.nameSuffix) {
|
||||
iscsiName += this.options.iscsi.nameSuffix;
|
||||
assetName += this.options.iscsi.nameSuffix;
|
||||
}
|
||||
}
|
||||
|
||||
iscsiName = iscsiName.toLowerCase();
|
||||
assetName = assetName.toLowerCase();
|
||||
switch (this.options.iscsi.shareStrategy) {
|
||||
case "targetCli":
|
||||
basename = this.options.iscsi.shareStrategyTargetCli.basename;
|
||||
|
|
@ -414,11 +653,11 @@ create /backstores/block/${iscsiName}
|
|||
`
|
||||
# delete target
|
||||
cd /iscsi
|
||||
delete ${basename}:${iscsiName}
|
||||
delete ${basename}:${assetName}
|
||||
|
||||
# delete extent
|
||||
cd /backstores/block
|
||||
delete ${iscsiName}
|
||||
delete ${assetName}
|
||||
`
|
||||
);
|
||||
},
|
||||
|
|
@ -437,6 +676,132 @@ delete ${iscsiName}
|
|||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case "zfs-generic-nvmeof": {
|
||||
let basename;
|
||||
let assetName;
|
||||
|
||||
// Delete nvmeof assets
|
||||
try {
|
||||
properties = await zb.zfs.get(datasetName, [
|
||||
NVMEOF_ASSETS_NAME_PROPERTY_NAME,
|
||||
]);
|
||||
} catch (err) {
|
||||
if (err.toString().includes("dataset does not exist")) {
|
||||
return;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
|
||||
properties = properties[datasetName];
|
||||
this.ctx.logger.debug("zfs props data: %j", properties);
|
||||
|
||||
assetName = properties[NVMEOF_ASSETS_NAME_PROPERTY_NAME].value;
|
||||
|
||||
if (zb.helpers.isPropertyValueSet(assetName)) {
|
||||
//do nothing
|
||||
} else {
|
||||
assetName = zb.helpers.extractLeafName(datasetName);
|
||||
|
||||
if (this.options.nvmeof.namePrefix) {
|
||||
assetName = this.options.nvmeof.namePrefix + assetName;
|
||||
}
|
||||
|
||||
if (this.options.nvmeof.nameSuffix) {
|
||||
assetName += this.options.nvmeof.nameSuffix;
|
||||
}
|
||||
}
|
||||
|
||||
assetName = assetName.toLowerCase();
|
||||
switch (this.options.nvmeof.shareStrategy) {
|
||||
case "nvmetCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategyNvmetCli.basename;
|
||||
let savefile = _.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.configPath",
|
||||
""
|
||||
);
|
||||
if (savefile) {
|
||||
savefile = `savefile=${savefile}`;
|
||||
}
|
||||
let portCommands = "";
|
||||
this.options.nvmeof.shareStrategyNvmetCli.ports.forEach(
|
||||
(port) => {
|
||||
portCommands += `
|
||||
cd /ports/${port}/subsystems
|
||||
delete ${basename}:${assetName}
|
||||
`;
|
||||
}
|
||||
);
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.nvmetCliCommand(
|
||||
`
|
||||
# delete subsystem from port
|
||||
${portCommands}
|
||||
|
||||
# delete subsystem
|
||||
cd /subsystems
|
||||
delete ${basename}:${assetName}
|
||||
|
||||
saveconfig ${savefile}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
case "spdkCli":
|
||||
{
|
||||
basename = this.options.nvmeof.shareStrategySpdkCli.basename;
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.spdkCliCommand(
|
||||
`
|
||||
# delete subsystem
|
||||
cd /nvmf/subsystem/
|
||||
delete subsystem_nqn=${basename}:${assetName}
|
||||
|
||||
# delete bdev
|
||||
cd /bdevs/${this.options.nvmeof.shareStrategySpdkCli.bdev.type}
|
||||
delete name=${basename}:${assetName}
|
||||
|
||||
cd /
|
||||
save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
throw new GrpcError(
|
||||
|
|
@ -477,18 +842,18 @@ delete ${iscsiName}
|
|||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
|
||||
let targetCliArgs = ["targetcli"];
|
||||
let cliArgs = ["targetcli"];
|
||||
if (
|
||||
_.get(this.options, "iscsi.shareStrategyTargetCli.sudoEnabled", false)
|
||||
) {
|
||||
targetCliArgs.unshift("sudo");
|
||||
cliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
let targetCliCommand = [];
|
||||
targetCliCommand.push(`echo "${data}"`.trim());
|
||||
targetCliCommand.push("|");
|
||||
targetCliCommand.push(targetCliArgs.join(" "));
|
||||
args.push("'" + targetCliCommand.join(" ") + "'");
|
||||
let cliCommand = [];
|
||||
cliCommand.push(`echo "${data}"`.trim());
|
||||
cliCommand.push("|");
|
||||
cliCommand.push(cliArgs.join(" "));
|
||||
args.push("'" + cliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
|
@ -527,6 +892,151 @@ delete ${iscsiName}
|
|||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
async nvmetCliCommand(data) {
|
||||
const execClient = this.getExecClient();
|
||||
const driver = this;
|
||||
|
||||
if (
|
||||
_.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.configIsImportedFilePath"
|
||||
)
|
||||
) {
|
||||
try {
|
||||
let response = await execClient.exec(
|
||||
execClient.buildCommand("test", [
|
||||
"-f",
|
||||
_.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.configIsImportedFilePath"
|
||||
),
|
||||
])
|
||||
);
|
||||
} catch (err) {
|
||||
throw new Error("nvmet has not been fully configured");
|
||||
}
|
||||
}
|
||||
|
||||
data = data.trim();
|
||||
|
||||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
|
||||
let cliArgs = [
|
||||
_.get(
|
||||
this.options,
|
||||
"nvmeof.shareStrategyNvmetCli.nvmetcliPath",
|
||||
"nvmetcli"
|
||||
),
|
||||
];
|
||||
if (
|
||||
_.get(this.options, "nvmeof.shareStrategyNvmetCli.sudoEnabled", false)
|
||||
) {
|
||||
cliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
let cliCommand = [];
|
||||
cliCommand.push(`echo "${data}"`.trim());
|
||||
cliCommand.push("|");
|
||||
cliCommand.push(cliArgs.join(" "));
|
||||
args.push("'" + cliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
||||
logCommandTmp.split("\n").forEach((line) => {
|
||||
if (line.startsWith("set auth password=")) {
|
||||
logCommand += "set auth password=<redacted>";
|
||||
} else if (line.startsWith("set auth mutual_password=")) {
|
||||
logCommand += "set auth mutual_password=<redacted>";
|
||||
} else {
|
||||
logCommand += line;
|
||||
}
|
||||
|
||||
logCommand += "\n";
|
||||
});
|
||||
|
||||
driver.ctx.logger.verbose("nvmetCLI command: " + logCommand);
|
||||
//process.exit(0);
|
||||
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/127
|
||||
// https://bugs.launchpad.net/ubuntu/+source/python-configshell-fb/+bug/1776761
|
||||
// can apply the linked patch with some modifications to overcome the
|
||||
// KeyErrors or we can simply start a fake tty which does not seem to have
|
||||
// a detrimental effect, only affects Ubuntu 18.04 and older
|
||||
let options = {
|
||||
pty: true,
|
||||
};
|
||||
let response = await execClient.exec(
|
||||
execClient.buildCommand(command, args),
|
||||
options
|
||||
);
|
||||
driver.ctx.logger.verbose("nvmetCLI response: " + JSON.stringify(response));
|
||||
if (response.code != 0) {
|
||||
throw response;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
|
||||
async spdkCliCommand(data) {
|
||||
const execClient = this.getExecClient();
|
||||
const driver = this;
|
||||
|
||||
data = data.trim();
|
||||
|
||||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
|
||||
let cliArgs = [
|
||||
_.get(this.options, "nvmeof.shareStrategySpdkCli.spdkcliPath", "spdkcli"),
|
||||
];
|
||||
if (_.get(this.options, "nvmeof.shareStrategySpdkCli.sudoEnabled", false)) {
|
||||
cliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
let cliCommand = [];
|
||||
cliCommand.push(`echo "${data}"`.trim());
|
||||
cliCommand.push("|");
|
||||
cliCommand.push(cliArgs.join(" "));
|
||||
args.push("'" + cliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
||||
logCommandTmp.split("\n").forEach((line) => {
|
||||
if (line.startsWith("set auth password=")) {
|
||||
logCommand += "set auth password=<redacted>";
|
||||
} else if (line.startsWith("set auth mutual_password=")) {
|
||||
logCommand += "set auth mutual_password=<redacted>";
|
||||
} else {
|
||||
logCommand += line;
|
||||
}
|
||||
|
||||
logCommand += "\n";
|
||||
});
|
||||
|
||||
driver.ctx.logger.verbose("spdkCLI command: " + logCommand);
|
||||
//process.exit(0);
|
||||
|
||||
// https://github.com/democratic-csi/democratic-csi/issues/127
|
||||
// https://bugs.launchpad.net/ubuntu/+source/python-configshell-fb/+bug/1776761
|
||||
// can apply the linked patch with some modifications to overcome the
|
||||
// KeyErrors or we can simply start a fake tty which does not seem to have
|
||||
// a detrimental effect, only affects Ubuntu 18.04 and older
|
||||
let options = {
|
||||
pty: true,
|
||||
};
|
||||
let response = await execClient.exec(
|
||||
execClient.buildCommand(command, args),
|
||||
options
|
||||
);
|
||||
driver.ctx.logger.verbose("spdkCLI response: " + JSON.stringify(response));
|
||||
if (response.code != 0) {
|
||||
throw response;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.ControllerZfsGenericDriver = ControllerZfsGenericDriver;
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@ const _ = require("lodash");
|
|||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const LocalCliExecClient = require("./exec").LocalCliClient;
|
||||
const LocalCliExecClient =
|
||||
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
||||
const registry = require("../../utils/registry");
|
||||
const { Zetabyte } = require("../../utils/zfs");
|
||||
|
||||
|
|
@ -109,7 +110,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
|||
*
|
||||
* @returns Array
|
||||
*/
|
||||
getAccessModes() {
|
||||
getAccessModes(capability) {
|
||||
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||
if (access_modes !== null) {
|
||||
return access_modes;
|
||||
|
|
@ -118,7 +119,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
|||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return [
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
|
|
@ -128,8 +129,9 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
|||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
break;
|
||||
case "volume":
|
||||
return [
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
|
|
@ -139,7 +141,17 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
|||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ const MAX_ZVOL_NAME_LENGTH_CACHE_KEY = "controller-zfs:max_zvol_name_length";
|
|||
* - async setZetabyteCustomOptions(options) // optional
|
||||
* - getDriverZfsResourceType() // return "filesystem" or "volume"
|
||||
* - getFSTypes() // optional
|
||||
* - getAccessModes() // optional
|
||||
* - getAccessModes(capability) // optional
|
||||
* - async getAccessibleTopology() // optional
|
||||
* - async createShare(call, datasetName) // return appropriate volume_context for Node operations
|
||||
* - async deleteShare(call, datasetName) // no return expected
|
||||
|
|
@ -207,7 +207,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
getAccessModes() {
|
||||
getAccessModes(capability) {
|
||||
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||
if (access_modes !== null) {
|
||||
return access_modes;
|
||||
|
|
@ -216,7 +216,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return [
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
|
|
@ -226,8 +226,9 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
break;
|
||||
case "volume":
|
||||
return [
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
|
|
@ -236,7 +237,17 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
];
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
assertCapabilities(capabilities) {
|
||||
|
|
@ -261,7 +272,11 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
return false;
|
||||
}
|
||||
|
||||
if (!this.getAccessModes().includes(capability.access_mode.mode)) {
|
||||
if (
|
||||
!this.getAccessModes(capability).includes(
|
||||
capability.access_mode.mode
|
||||
)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
}
|
||||
|
|
@ -278,7 +293,11 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
if (!this.getAccessModes().includes(capability.access_mode.mode)) {
|
||||
if (
|
||||
!this.getAccessModes(capability).includes(
|
||||
capability.access_mode.mode
|
||||
)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ function factory(ctx, options) {
|
|||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-smb":
|
||||
case "zfs-generic-iscsi":
|
||||
case "zfs-generic-nvmeof":
|
||||
return new ControllerZfsGenericDriver(ctx, options);
|
||||
case "zfs-local-dataset":
|
||||
case "zfs-local-zvol":
|
||||
|
|
|
|||
|
|
@ -2017,6 +2017,49 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
});
|
||||
}
|
||||
|
||||
getAccessModes(capability) {
|
||||
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||
if (access_modes !== null) {
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
break;
|
||||
case "volume":
|
||||
access_modes = [
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
];
|
||||
break;
|
||||
}
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
return access_modes;
|
||||
}
|
||||
|
||||
assertCapabilities(capabilities) {
|
||||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
|
||||
|
|
@ -2040,16 +2083,9 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
"MULTI_NODE_MULTI_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
!this.getAccessModes(capability).includes(
|
||||
capability.access_mode.mode
|
||||
)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
|
|
@ -2070,15 +2106,9 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (
|
||||
![
|
||||
"UNKNOWN",
|
||||
"SINGLE_NODE_WRITER",
|
||||
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||
"SINGLE_NODE_READER_ONLY",
|
||||
"MULTI_NODE_READER_ONLY",
|
||||
"MULTI_NODE_SINGLE_WRITER",
|
||||
].includes(capability.access_mode.mode)
|
||||
!this.getAccessModes(capability).includes(
|
||||
capability.access_mode.mode
|
||||
)
|
||||
) {
|
||||
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||
return false;
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ const _ = require("lodash");
|
|||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const registry = require("../../utils/registry");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||
const HttpClient = require("./http").Client;
|
||||
const TrueNASApiClient = require("./http/api").Api;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
|
|
|||
|
|
@ -9,10 +9,12 @@ const { Mount } = require("../utils/mount");
|
|||
const { OneClient } = require("../utils/oneclient");
|
||||
const { Filesystem } = require("../utils/filesystem");
|
||||
const { ISCSI } = require("../utils/iscsi");
|
||||
const { NVMEoF } = require("../utils/nvmeof");
|
||||
const registry = require("../utils/registry");
|
||||
const semver = require("semver");
|
||||
const GeneralUtils = require("../utils/general");
|
||||
const { Zetabyte } = require("../utils/zfs");
|
||||
const { transport } = require("winston");
|
||||
|
||||
const __REGISTRY_NS__ = "CsiBaseDriver";
|
||||
|
||||
|
|
@ -139,6 +141,18 @@ class CsiBaseDriver {
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get an instance of the NVMEoF class
|
||||
*
|
||||
* @returns NVMEoF
|
||||
*/
|
||||
getDefaultNVMEoFInstance() {
|
||||
const driver = this;
|
||||
return registry.get(`${__REGISTRY_NS__}:default_nvmeof_instance`, () => {
|
||||
return new NVMEoF({ logger: driver.ctx.logger });
|
||||
});
|
||||
}
|
||||
|
||||
getDefaultZetabyteInstance() {
|
||||
return registry.get(`${__REGISTRY_NS__}:default_zb_instance`, () => {
|
||||
return new Zetabyte({
|
||||
|
|
@ -560,6 +574,7 @@ class CsiBaseDriver {
|
|||
const mount = driver.getDefaultMountInstance();
|
||||
const filesystem = driver.getDefaultFilesystemInstance();
|
||||
const iscsi = driver.getDefaultISCSIInstance();
|
||||
const nvmeof = driver.getDefaultNVMEoFInstance();
|
||||
let result;
|
||||
let device;
|
||||
let block_device_info;
|
||||
|
|
@ -792,7 +807,11 @@ class CsiBaseDriver {
|
|||
await iscsi.iscsiadm.rescanSession(session);
|
||||
|
||||
// find device name
|
||||
device = iscsi.devicePathByPortalIQNLUN(iscsiConnection.portal, iscsiConnection.iqn, iscsiConnection.lun)
|
||||
device = iscsi.devicePathByPortalIQNLUN(
|
||||
iscsiConnection.portal,
|
||||
iscsiConnection.iqn,
|
||||
iscsiConnection.lun
|
||||
);
|
||||
let deviceByPath = device;
|
||||
|
||||
// can take some time for device to show up, loop for some period
|
||||
|
|
@ -887,6 +906,242 @@ class CsiBaseDriver {
|
|||
}
|
||||
|
||||
break;
|
||||
|
||||
case "nvmeof":
|
||||
{
|
||||
let transports = [];
|
||||
if (volume_context.transport) {
|
||||
transports.push(volume_context.transport.trim());
|
||||
}
|
||||
|
||||
if (volume_context.transports) {
|
||||
volume_context.transports.split(",").forEach((transport) => {
|
||||
transports.push(transport.trim());
|
||||
});
|
||||
}
|
||||
|
||||
// ensure unique entries only
|
||||
transports = [...new Set(transports)];
|
||||
|
||||
// stores actual device paths after nvmeof login
|
||||
let nvmeofControllerDevices = [];
|
||||
let nvmeofNamespaceDevices = [];
|
||||
|
||||
// stores configuration of targets/iqn/luns to connect to
|
||||
let nvmeofConnections = [];
|
||||
for (let transport of transports) {
|
||||
nvmeofConnections.push({
|
||||
transport,
|
||||
nqn: volume_context.nqn,
|
||||
nsid: volume_context.nsid,
|
||||
});
|
||||
}
|
||||
|
||||
for (let nvmeofConnection of nvmeofConnections) {
|
||||
// connect
|
||||
try {
|
||||
await GeneralUtils.retry(15, 2000, async () => {
|
||||
await nvmeof.connectByNQNTransport(
|
||||
nvmeofConnection.nqn,
|
||||
nvmeofConnection.transport
|
||||
);
|
||||
});
|
||||
} catch (err) {
|
||||
driver.ctx.logger.warn(
|
||||
`error: ${JSON.stringify(err)} connecting to transport: ${
|
||||
nvmeofConnection.transport
|
||||
}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// find controller device
|
||||
let controllerDevice;
|
||||
try {
|
||||
await GeneralUtils.retry(15, 2000, async () => {
|
||||
controllerDevice =
|
||||
await nvmeof.controllerDevicePathByTransportNQN(
|
||||
nvmeofConnection.transport,
|
||||
nvmeofConnection.nqn,
|
||||
nvmeofConnection.nsid
|
||||
);
|
||||
|
||||
if (!controllerDevice) {
|
||||
throw new Error(`failed to find controller device`);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
driver.ctx.logger.warn(
|
||||
`error finding nvme controller device: ${JSON.stringify(
|
||||
err
|
||||
)}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// find namespace device
|
||||
let namespaceDevice;
|
||||
try {
|
||||
await GeneralUtils.retry(15, 2000, async () => {
|
||||
// rescan in scenarios when login previously occurred but volumes never appeared
|
||||
// must be the NVMe char device, not the namespace device
|
||||
await nvmeof.rescanNamespace(controllerDevice);
|
||||
|
||||
namespaceDevice =
|
||||
await nvmeof.namespaceDevicePathByTransportNQNNamespace(
|
||||
nvmeofConnection.transport,
|
||||
nvmeofConnection.nqn,
|
||||
nvmeofConnection.nsid
|
||||
);
|
||||
if (!controllerDevice) {
|
||||
throw new Error(`failed to find namespace device`);
|
||||
}
|
||||
});
|
||||
} catch (err) {
|
||||
driver.ctx.logger.warn(
|
||||
`error finding nvme namespace device: ${JSON.stringify(
|
||||
err
|
||||
)}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// sanity check for device files
|
||||
if (!namespaceDevice) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// sanity check for device files
|
||||
if (!controllerDevice) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// can take some time for device to show up, loop for some period
|
||||
result = await filesystem.pathExists(namespaceDevice);
|
||||
let timer_start = Math.round(new Date().getTime() / 1000);
|
||||
let timer_max = 30;
|
||||
let deviceCreated = result;
|
||||
while (!result) {
|
||||
await GeneralUtils.sleep(2000);
|
||||
result = await filesystem.pathExists(namespaceDevice);
|
||||
|
||||
if (result) {
|
||||
deviceCreated = true;
|
||||
break;
|
||||
}
|
||||
|
||||
let current_time = Math.round(new Date().getTime() / 1000);
|
||||
if (!result && current_time - timer_start > timer_max) {
|
||||
driver.ctx.logger.warn(
|
||||
`hit timeout waiting for namespace device node to appear: ${namespaceDevice}`
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (deviceCreated) {
|
||||
device = await filesystem.realpath(namespaceDevice);
|
||||
nvmeofControllerDevices.push(controllerDevice);
|
||||
nvmeofNamespaceDevices.push(namespaceDevice);
|
||||
|
||||
driver.ctx.logger.info(
|
||||
`successfully logged into nvmeof transport ${nvmeofConnection.transport} and created controller device: ${controllerDevice}, namespace device: ${namespaceDevice}`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// let things settle
|
||||
// this will help in dm scenarios
|
||||
await GeneralUtils.sleep(2000);
|
||||
|
||||
// filter duplicates
|
||||
nvmeofNamespaceDevices = nvmeofNamespaceDevices.filter(
|
||||
(value, index, self) => {
|
||||
return self.indexOf(value) === index;
|
||||
}
|
||||
);
|
||||
|
||||
nvmeofControllerDevices = nvmeofControllerDevices.filter(
|
||||
(value, index, self) => {
|
||||
return self.indexOf(value) === index;
|
||||
}
|
||||
);
|
||||
|
||||
// only throw an error if we were not able to attach to *any* devices
|
||||
if (nvmeofNamespaceDevices.length < 1) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`unable to attach any nvme devices`
|
||||
);
|
||||
}
|
||||
|
||||
if (nvmeofControllerDevices.length != nvmeofConnections.length) {
|
||||
driver.ctx.logger.warn(
|
||||
`failed to attach all nvmeof devices/subsystems/transports`
|
||||
);
|
||||
|
||||
// TODO: allow a parameter to control this behavior in some form
|
||||
if (false) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`unable to attach all iscsi devices`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* NVMEoF has native multipath capabilities without using device mapper
|
||||
* You can disable the built-in using kernel param nvme_core.multipath=N/Y
|
||||
*/
|
||||
let useNativeMultipath = await nvmeof.nativeMultipathEnabled();
|
||||
|
||||
if (useNativeMultipath) {
|
||||
// only throw an error if we were not able to attach to *any* devices
|
||||
if (nvmeofNamespaceDevices.length > 1) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`too many nvme namespace devices, native multipath enabled therefore should only have 1`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// compare all device-mapper slaves with the newly created devices
|
||||
// if any of the new devices are device-mapper slaves treat this as a
|
||||
// multipath scenario
|
||||
let allDeviceMapperSlaves =
|
||||
await filesystem.getAllDeviceMapperSlaveDevices();
|
||||
let commonDevices = allDeviceMapperSlaves.filter((value) =>
|
||||
nvmeofNamespaceDevices.includes(value)
|
||||
);
|
||||
|
||||
const useDMMultipath =
|
||||
nvmeofConnections.length > 1 || commonDevices.length > 0;
|
||||
|
||||
// discover multipath device to use
|
||||
if (useDMMultipath) {
|
||||
device = await filesystem.getDeviceMapperDeviceFromSlaves(
|
||||
nvmeofNamespaceDevices,
|
||||
false
|
||||
);
|
||||
|
||||
if (!device) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`failed to discover multipath device`
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// only throw an error if we were not able to attach to *any* devices
|
||||
if (nvmeofNamespaceDevices.length > 1) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`too many nvme namespace devices, neither DM nor native multipath enabled`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
case "hostpath":
|
||||
result = await mount.pathIsMounted(staging_target_path);
|
||||
// if not mounted, mount
|
||||
|
|
@ -989,6 +1244,7 @@ class CsiBaseDriver {
|
|||
let is_block = false;
|
||||
switch (node_attach_driver) {
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
is_block = true;
|
||||
break;
|
||||
case "zfs-local":
|
||||
|
|
@ -1053,6 +1309,16 @@ class CsiBaseDriver {
|
|||
if (!Array.isArray(formatOptions)) {
|
||||
formatOptions = [];
|
||||
}
|
||||
|
||||
switch (fs_type) {
|
||||
case "ext3":
|
||||
case "ext4":
|
||||
case "ext4dev":
|
||||
// disable reserved blocks in this scenario
|
||||
formatOptions.unshift("-m", "0");
|
||||
break;
|
||||
}
|
||||
|
||||
await filesystem.formatDevice(device, fs_type, formatOptions);
|
||||
}
|
||||
|
||||
|
|
@ -1093,6 +1359,7 @@ class CsiBaseDriver {
|
|||
fs_type = "cifs";
|
||||
break;
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
fs_type = "ext4";
|
||||
break;
|
||||
default:
|
||||
|
|
@ -1988,6 +2255,7 @@ class CsiBaseDriver {
|
|||
const mount = driver.getDefaultMountInstance();
|
||||
const filesystem = driver.getDefaultFilesystemInstance();
|
||||
const iscsi = driver.getDefaultISCSIInstance();
|
||||
const nvmeof = driver.getDefaultNVMEoFInstance();
|
||||
let result;
|
||||
let is_block = false;
|
||||
let is_device_mapper = false;
|
||||
|
|
@ -2101,6 +2369,7 @@ class CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (is_block) {
|
||||
let breakdeviceloop = false;
|
||||
let realBlockDeviceInfos = [];
|
||||
// detect if is a multipath device
|
||||
is_device_mapper = await filesystem.isDeviceMapperDevice(
|
||||
|
|
@ -2122,94 +2391,127 @@ class CsiBaseDriver {
|
|||
|
||||
// TODO: this could be made async to detach all simultaneously
|
||||
for (const block_device_info_i of realBlockDeviceInfos) {
|
||||
if (await filesystem.deviceIsIscsi(block_device_info_i.path)) {
|
||||
let parent_block_device = await filesystem.getBlockDeviceParent(
|
||||
block_device_info_i.path
|
||||
);
|
||||
if (breakdeviceloop) {
|
||||
break;
|
||||
}
|
||||
switch (block_device_info_i.tran) {
|
||||
case "iscsi":
|
||||
{
|
||||
if (
|
||||
await filesystem.deviceIsIscsi(block_device_info_i.path)
|
||||
) {
|
||||
let parent_block_device =
|
||||
await filesystem.getBlockDeviceParent(
|
||||
block_device_info_i.path
|
||||
);
|
||||
|
||||
// figure out which iscsi session this belongs to and logout
|
||||
// scan /dev/disk/by-path/ip-*?
|
||||
// device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`;
|
||||
// parse output from `iscsiadm -m session -P 3`
|
||||
let sessions = await iscsi.iscsiadm.getSessionsDetails();
|
||||
for (let i = 0; i < sessions.length; i++) {
|
||||
let session = sessions[i];
|
||||
let is_attached_to_session = false;
|
||||
// figure out which iscsi session this belongs to and logout
|
||||
// scan /dev/disk/by-path/ip-*?
|
||||
// device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`;
|
||||
// parse output from `iscsiadm -m session -P 3`
|
||||
let sessions = await iscsi.iscsiadm.getSessionsDetails();
|
||||
for (let i = 0; i < sessions.length; i++) {
|
||||
let session = sessions[i];
|
||||
let is_attached_to_session = false;
|
||||
|
||||
if (
|
||||
session.attached_scsi_devices &&
|
||||
session.attached_scsi_devices.host &&
|
||||
session.attached_scsi_devices.host.devices
|
||||
) {
|
||||
is_attached_to_session =
|
||||
session.attached_scsi_devices.host.devices.some(
|
||||
(device) => {
|
||||
if (
|
||||
device.attached_scsi_disk == parent_block_device.name
|
||||
) {
|
||||
return true;
|
||||
if (
|
||||
session.attached_scsi_devices &&
|
||||
session.attached_scsi_devices.host &&
|
||||
session.attached_scsi_devices.host.devices
|
||||
) {
|
||||
is_attached_to_session =
|
||||
session.attached_scsi_devices.host.devices.some(
|
||||
(device) => {
|
||||
if (
|
||||
device.attached_scsi_disk ==
|
||||
parent_block_device.name
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
if (is_attached_to_session) {
|
||||
let timer_start;
|
||||
let timer_max;
|
||||
|
||||
timer_start = Math.round(new Date().getTime() / 1000);
|
||||
timer_max = 30;
|
||||
let loggedOut = false;
|
||||
while (!loggedOut) {
|
||||
try {
|
||||
await iscsi.iscsiadm.logout(session.target, [
|
||||
session.persistent_portal,
|
||||
]);
|
||||
loggedOut = true;
|
||||
} catch (err) {
|
||||
await GeneralUtils.sleep(2000);
|
||||
let current_time = Math.round(
|
||||
new Date().getTime() / 1000
|
||||
);
|
||||
if (current_time - timer_start > timer_max) {
|
||||
// not throwing error for now as future invocations would not enter code path anyhow
|
||||
loggedOut = true;
|
||||
//throw new GrpcError(
|
||||
// grpc.status.UNKNOWN,
|
||||
// `hit timeout trying to logout of iscsi target: ${session.persistent_portal}`
|
||||
//);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timer_start = Math.round(new Date().getTime() / 1000);
|
||||
timer_max = 30;
|
||||
let deletedEntry = false;
|
||||
while (!deletedEntry) {
|
||||
try {
|
||||
await iscsi.iscsiadm.deleteNodeDBEntry(
|
||||
session.target,
|
||||
session.persistent_portal
|
||||
);
|
||||
deletedEntry = true;
|
||||
} catch (err) {
|
||||
await GeneralUtils.sleep(2000);
|
||||
let current_time = Math.round(
|
||||
new Date().getTime() / 1000
|
||||
);
|
||||
if (current_time - timer_start > timer_max) {
|
||||
// not throwing error for now as future invocations would not enter code path anyhow
|
||||
deletedEntry = true;
|
||||
//throw new GrpcError(
|
||||
// grpc.status.UNKNOWN,
|
||||
// `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}`
|
||||
//);
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case "nvme":
|
||||
{
|
||||
if (
|
||||
await filesystem.deviceIsNVMEoF(block_device_info_i.path)
|
||||
) {
|
||||
let nqn = await nvmeof.nqnByNamespaceDeviceName(
|
||||
block_device_info_i.name
|
||||
);
|
||||
}
|
||||
|
||||
if (is_attached_to_session) {
|
||||
let timer_start;
|
||||
let timer_max;
|
||||
|
||||
timer_start = Math.round(new Date().getTime() / 1000);
|
||||
timer_max = 30;
|
||||
let loggedOut = false;
|
||||
while (!loggedOut) {
|
||||
try {
|
||||
await iscsi.iscsiadm.logout(session.target, [
|
||||
session.persistent_portal,
|
||||
]);
|
||||
loggedOut = true;
|
||||
} catch (err) {
|
||||
await GeneralUtils.sleep(2000);
|
||||
let current_time = Math.round(
|
||||
new Date().getTime() / 1000
|
||||
);
|
||||
if (current_time - timer_start > timer_max) {
|
||||
// not throwing error for now as future invocations would not enter code path anyhow
|
||||
loggedOut = true;
|
||||
//throw new GrpcError(
|
||||
// grpc.status.UNKNOWN,
|
||||
// `hit timeout trying to logout of iscsi target: ${session.persistent_portal}`
|
||||
//);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timer_start = Math.round(new Date().getTime() / 1000);
|
||||
timer_max = 30;
|
||||
let deletedEntry = false;
|
||||
while (!deletedEntry) {
|
||||
try {
|
||||
await iscsi.iscsiadm.deleteNodeDBEntry(
|
||||
session.target,
|
||||
session.persistent_portal
|
||||
);
|
||||
deletedEntry = true;
|
||||
} catch (err) {
|
||||
await GeneralUtils.sleep(2000);
|
||||
let current_time = Math.round(
|
||||
new Date().getTime() / 1000
|
||||
);
|
||||
if (current_time - timer_start > timer_max) {
|
||||
// not throwing error for now as future invocations would not enter code path anyhow
|
||||
deletedEntry = true;
|
||||
//throw new GrpcError(
|
||||
// grpc.status.UNKNOWN,
|
||||
// `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}`
|
||||
//);
|
||||
}
|
||||
if (nqn) {
|
||||
await nvmeof.disconnectByNQN(nqn);
|
||||
/**
|
||||
* the above disconnects *all* devices with the nqn so we
|
||||
* do NOT want to keep iterating all the 'real' devices
|
||||
* in the case of DM multipath
|
||||
*/
|
||||
breakdeviceloop = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2539,6 +2841,7 @@ class CsiBaseDriver {
|
|||
case "oneclient":
|
||||
case "hostpath":
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
case "zfs-local":
|
||||
// ensure appropriate directories/files
|
||||
switch (access_type) {
|
||||
|
|
@ -3205,6 +3508,8 @@ class CsiBaseDriver {
|
|||
const driver = this;
|
||||
const mount = driver.getDefaultMountInstance();
|
||||
const filesystem = driver.getDefaultFilesystemInstance();
|
||||
const nvmeof = driver.getDefaultNVMEoFInstance();
|
||||
|
||||
let device;
|
||||
let fs_info;
|
||||
let device_path;
|
||||
|
|
@ -3267,6 +3572,14 @@ class CsiBaseDriver {
|
|||
rescan_devices.push(device);
|
||||
|
||||
for (let sdevice of rescan_devices) {
|
||||
let is_nvmeof = await filesystem.deviceIsNVMEoF(sdevice);
|
||||
if (is_nvmeof) {
|
||||
let controllers =
|
||||
await nvmeof.getControllersByNamespaceDeviceName(sdevice);
|
||||
for (let controller of controllers) {
|
||||
await nvmeof.rescanNamespace(`/dev/${controller.Controller}`);
|
||||
}
|
||||
}
|
||||
// TODO: technically rescan is only relevant/available for remote drives
|
||||
// such as iscsi etc, should probably limit this call as appropriate
|
||||
// for now crudely checking the scenario inside the method itself
|
||||
|
|
|
|||
|
|
@ -129,6 +129,7 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
driverResourceType = "filesystem";
|
||||
break;
|
||||
case "iscsi":
|
||||
case "nvmeof":
|
||||
driverResourceType = "volume";
|
||||
fs_types = ["btrfs", "ext3", "ext4", "ext4dev", "xfs"];
|
||||
break;
|
||||
|
|
@ -164,6 +165,14 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
"MULTI_NODE_MULTI_WRITER",
|
||||
];
|
||||
}
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
if (capability.access_type != "mount") {
|
||||
message = `invalid access_type ${capability.access_type}`;
|
||||
return false;
|
||||
|
|
@ -195,6 +204,14 @@ class NodeManualDriver extends CsiBaseDriver {
|
|||
"MULTI_NODE_SINGLE_WRITER",
|
||||
];
|
||||
}
|
||||
|
||||
if (
|
||||
capability.access_type == "block" &&
|
||||
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||
) {
|
||||
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||
}
|
||||
|
||||
if (capability.access_type == "mount") {
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ const { GrpcError, grpc } = require("../../utils/grpc");
|
|||
const { Filesystem } = require("../../utils/filesystem");
|
||||
const registry = require("../../utils/registry");
|
||||
const semver = require("semver");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
// zfs common properties
|
||||
|
|
|
|||
|
|
@ -504,7 +504,8 @@ class Filesystem {
|
|||
* lsblk
|
||||
* blkid
|
||||
*/
|
||||
const strategy = process.env.FILESYSTEM_TYPE_DETECTION_STRATEGY || "lsblk";
|
||||
const strategy =
|
||||
process.env.FILESYSTEM_TYPE_DETECTION_STRATEGY || "lsblk";
|
||||
|
||||
switch (strategy) {
|
||||
// requires udev data to be present otherwise fstype property is always null but otherwise succeeds
|
||||
|
|
@ -547,6 +548,21 @@ class Filesystem {
|
|||
return result && result.tran == "iscsi";
|
||||
}
|
||||
|
||||
async deviceIsNVMEoF(device) {
|
||||
const filesystem = this;
|
||||
let result;
|
||||
|
||||
do {
|
||||
if (result) {
|
||||
device = `/dev/${result.pkname}`;
|
||||
}
|
||||
result = await filesystem.getBlockDevice(device);
|
||||
} while (result.pkname);
|
||||
|
||||
// TODO: add further logic here to ensure the device is not a local pcie/etc device
|
||||
return result && result.tran == "nvme";
|
||||
}
|
||||
|
||||
async getBlockDeviceParent(device) {
|
||||
const filesystem = this;
|
||||
let result;
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const _ = require("lodash");
|
||||
const axios = require("axios");
|
||||
const crypto = require("crypto");
|
||||
const dns = require("dns");
|
||||
|
||||
function sleep(ms) {
|
||||
return new Promise((resolve) => {
|
||||
|
|
@ -8,6 +9,17 @@ function sleep(ms) {
|
|||
});
|
||||
}
|
||||
|
||||
function trimchar(str, ch) {
|
||||
var start = 0,
|
||||
end = str.length;
|
||||
|
||||
while (start < end && str[start] === ch) ++start;
|
||||
|
||||
while (end > start && str[end - 1] === ch) --end;
|
||||
|
||||
return start > 0 || end < str.length ? str.substring(start, end) : str;
|
||||
}
|
||||
|
||||
function md5(val) {
|
||||
return crypto.createHash("md5").update(val).digest("hex");
|
||||
}
|
||||
|
|
@ -87,10 +99,10 @@ function lockKeysFromRequest(call, serviceMethodName) {
|
|||
case "NodeUnstageVolume":
|
||||
case "NodePublishVolume":
|
||||
case "NodeUnpublishVolume":
|
||||
case "NodeGetVolumeStats":
|
||||
case "NodeExpandVolume":
|
||||
return ["volume_id_" + call.request.volume_id];
|
||||
|
||||
case "NodeGetVolumeStats":
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
|
|
@ -250,6 +262,18 @@ async function retry(retries, retriesDelay, code, options = {}) {
|
|||
} while (true);
|
||||
}
|
||||
|
||||
async function hostname_lookup(hostname) {
|
||||
return new Promise((resolve, reject) => {
|
||||
dns.lookup(hostname, function (err, result) {
|
||||
if (err) {
|
||||
return reject(err);
|
||||
}
|
||||
|
||||
return resolve(result);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
module.exports.sleep = sleep;
|
||||
module.exports.md5 = md5;
|
||||
module.exports.crc32 = crc32;
|
||||
|
|
@ -265,3 +289,5 @@ module.exports.default_supported_block_filesystems =
|
|||
module.exports.default_supported_file_filesystems =
|
||||
default_supported_file_filesystems;
|
||||
module.exports.retry = retry;
|
||||
module.exports.trimchar = trimchar;
|
||||
module.exports.hostname_lookup = hostname_lookup;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,554 @@
|
|||
const cp = require("child_process");
|
||||
const { hostname_lookup, trimchar } = require("./general");
|
||||
const URI = require("uri-js");
|
||||
const querystring = require("querystring");
|
||||
|
||||
const DEFAULT_TIMEOUT = process.env.NVMEOF_DEFAULT_TIMEOUT || 30000;
|
||||
|
||||
class NVMEoF {
|
||||
constructor(options = {}) {
|
||||
const nvmeof = this;
|
||||
nvmeof.options = options;
|
||||
|
||||
options.paths = options.paths || {};
|
||||
if (!options.paths.nvme) {
|
||||
options.paths.nvme = "nvme";
|
||||
}
|
||||
|
||||
if (!options.paths.sudo) {
|
||||
options.paths.sudo = "/usr/bin/sudo";
|
||||
}
|
||||
|
||||
if (!options.executor) {
|
||||
options.executor = {
|
||||
spawn: cp.spawn,
|
||||
};
|
||||
}
|
||||
|
||||
if (nvmeof.options.logger) {
|
||||
nvmeof.logger = nvmeof.options.logger;
|
||||
} else {
|
||||
nvmeof.logger = console;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List all NVMe devices and namespaces on machine
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async list(args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("list", "-o", "json");
|
||||
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* List nvme subsystems
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async listSubsys(args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("list-subsys", "-o", "json");
|
||||
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Discover NVMeoF subsystems
|
||||
*
|
||||
* @param {*} transport
|
||||
* @param {*} args
|
||||
* @returns
|
||||
*/
|
||||
async discover(transport, args = []) {
|
||||
const nvmeof = this;
|
||||
transport = await nvmeof.parseTransport(transport);
|
||||
|
||||
let transport_args = [];
|
||||
if (transport.type) {
|
||||
transport_args.push("--transport", transport.type);
|
||||
}
|
||||
if (transport.address) {
|
||||
transport_args.push("--traddr", transport.address);
|
||||
}
|
||||
if (transport.service) {
|
||||
transport_args.push("--trsvcid", transport.service);
|
||||
}
|
||||
|
||||
args.unshift("discover", "-o", "json", ...transport_args);
|
||||
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Connect to NVMeoF subsystem
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async connectByNQNTransport(nqn, transport, args = []) {
|
||||
const nvmeof = this;
|
||||
transport = await nvmeof.parseTransport(transport);
|
||||
|
||||
let transport_args = [];
|
||||
if (transport.type) {
|
||||
transport_args.push("--transport", transport.type);
|
||||
}
|
||||
if (transport.address) {
|
||||
transport_args.push("--traddr", transport.address);
|
||||
}
|
||||
if (transport.service) {
|
||||
transport_args.push("--trsvcid", transport.service);
|
||||
}
|
||||
|
||||
if (transport.args) {
|
||||
for (let arg in transport.args) {
|
||||
let value = transport.args[arg];
|
||||
if (!arg.startsWith("-")) {
|
||||
arg = `--${arg}`;
|
||||
}
|
||||
|
||||
transport_args.push(arg, value);
|
||||
}
|
||||
}
|
||||
|
||||
args.unshift("connect", "--nqn", nqn, ...transport_args);
|
||||
|
||||
try {
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
} catch (err) {
|
||||
if (
|
||||
err.stderr &&
|
||||
(err.stderr.includes("already connnected") ||
|
||||
err.stderr.includes("Operation already in progress"))
|
||||
) {
|
||||
// idempotent
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from NVMeoF subsystem
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async disconnectByNQN(nqn, args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("disconnect", "--nqn", nqn);
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Disconnect from NVMeoF subsystem
|
||||
*
|
||||
* @param {*} args
|
||||
*/
|
||||
async disconnectByDevice(device, args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("disconnect", "--device", device);
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Rescans the NVME namespaces
|
||||
*
|
||||
* @param {*} device
|
||||
* @param {*} args
|
||||
*/
|
||||
async rescanNamespace(device, args = []) {
|
||||
const nvmeof = this;
|
||||
args.unshift("ns-rescan", device);
|
||||
await nvmeof.exec(nvmeof.options.paths.nvme, args);
|
||||
}
|
||||
|
||||
async deviceIsNamespaceDevice(device) {
|
||||
const nvmeof = this;
|
||||
device = device.replace("/dev/", "");
|
||||
const subsystems = await nvmeof.getSubsystems();
|
||||
for (let subsystem of subsystems) {
|
||||
// check subsystem namespaces
|
||||
if (subsystem.Namespaces) {
|
||||
for (let namespace of subsystem.Namespaces) {
|
||||
if (namespace.NameSpace == device) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check controller namespaces
|
||||
if (subsystem.Controllers) {
|
||||
for (let controller of subsystem.Controllers) {
|
||||
if (controller.Namespaces) {
|
||||
for (let namespace of controller.Namespaces) {
|
||||
if (namespace.NameSpace == device) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async deviceIsControllerDevice(device) {
|
||||
const nvmeof = this;
|
||||
device = device.replace("/dev/", "");
|
||||
const subsystems = await nvmeof.getSubsystems();
|
||||
for (let subsystem of subsystems) {
|
||||
if (subsystem.Controllers) {
|
||||
for (let controller of subsystem.Controllers) {
|
||||
if (controller.Controller == device) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async parseTransport(transport) {
|
||||
if (typeof transport === "object") {
|
||||
return transport;
|
||||
}
|
||||
|
||||
transport = transport.trim();
|
||||
const parsed = URI.parse(transport);
|
||||
let args = querystring.parse(parsed.query);
|
||||
|
||||
let type = parsed.scheme;
|
||||
let address = parsed.host;
|
||||
let service;
|
||||
switch (parsed.scheme) {
|
||||
case "fc":
|
||||
case "rdma":
|
||||
case "tcp":
|
||||
type = parsed.scheme;
|
||||
break;
|
||||
default:
|
||||
throw new Error(`unknown nvme transport type: ${parsed.scheme}`);
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case "fc":
|
||||
address = trimchar(address, "[");
|
||||
address = trimchar(address, "]");
|
||||
break;
|
||||
case "tcp":
|
||||
/**
|
||||
* kernel stores value as ip, so if address passed as hostname then
|
||||
* translate to ip address
|
||||
*
|
||||
* TODO: this could be brittle
|
||||
*/
|
||||
let lookup = await hostname_lookup(address);
|
||||
if (lookup) {
|
||||
address = lookup;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case "rdma":
|
||||
case "tcp":
|
||||
service = parsed.port;
|
||||
|
||||
if (!service) {
|
||||
service = 4420;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
return {
|
||||
type,
|
||||
address,
|
||||
service,
|
||||
args,
|
||||
};
|
||||
}
|
||||
|
||||
async nativeMultipathEnabled() {
|
||||
const nvmeof = this;
|
||||
let result = await nvmeof.exec("cat", [
|
||||
"/sys/module/nvme_core/parameters/multipath",
|
||||
]);
|
||||
return result.stdout.trim() == "Y";
|
||||
}
|
||||
|
||||
async namespaceDevicePathByTransportNQNNamespace(transport, nqn, namespace) {
|
||||
const nvmeof = this;
|
||||
transport = await nvmeof.parseTransport(transport);
|
||||
let nativeMultipathEnabled = await nvmeof.nativeMultipathEnabled();
|
||||
|
||||
if (nativeMultipathEnabled) {
|
||||
let subsystem = await nvmeof.getSubsystemByNQN(nqn);
|
||||
if (subsystem) {
|
||||
for (let i_namespace of subsystem.Namespaces) {
|
||||
if (i_namespace.NSID != namespace) {
|
||||
continue;
|
||||
} else {
|
||||
return `/dev/${i_namespace.NameSpace}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let controller = await nvmeof.getControllerByTransportNQN(transport, nqn);
|
||||
if (controller) {
|
||||
for (let i_namespace of controller.Namespaces) {
|
||||
if (i_namespace.NSID != namespace) {
|
||||
continue;
|
||||
} else {
|
||||
return `/dev/${i_namespace.NameSpace}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async controllerDevicePathByTransportNQN(transport, nqn) {
|
||||
const nvmeof = this;
|
||||
transport = await nvmeof.parseTransport(transport);
|
||||
let controller = await nvmeof.getControllerByTransportNQN(transport, nqn);
|
||||
if (controller) {
|
||||
return `/dev/${controller.Controller}`;
|
||||
}
|
||||
}
|
||||
|
||||
async getSubsystems() {
|
||||
const nvmeof = this;
|
||||
let result = await nvmeof.list(["-v"]);
|
||||
|
||||
return nvmeof.getNormalizedSubsystems(result);
|
||||
}
|
||||
|
||||
/**
|
||||
* used to normalize subsystem list/response across different versions of nvme-cli
|
||||
*
|
||||
* @param {*} result
|
||||
* @returns
|
||||
*/
|
||||
async getNormalizedSubsystems(result) {
|
||||
let subsystems = [];
|
||||
|
||||
for (let device of result.Devices) {
|
||||
if (Array.isArray(device.Subsystems)) {
|
||||
subsystems = subsystems.concat(device.Subsystems);
|
||||
} else if (device.Subsystem) {
|
||||
// nvme-cli 1.x support
|
||||
subsystems.push(device);
|
||||
}
|
||||
}
|
||||
|
||||
return subsystems;
|
||||
}
|
||||
|
||||
async getSubsystemByNQN(nqn) {
|
||||
const nvmeof = this;
|
||||
const subsystems = await nvmeof.getSubsystems();
|
||||
for (let subsystem of subsystems) {
|
||||
if (subsystem.SubsystemNQN == nqn) {
|
||||
return subsystem;
|
||||
}
|
||||
}
|
||||
|
||||
nvmeof.logger.warn(`failed to find subsystem for nqn: ${nqn}`);
|
||||
}
|
||||
|
||||
async getControllersByNamespaceDeviceName(name) {
|
||||
const nvmeof = this;
|
||||
name = name.replace("/dev/", "");
|
||||
let nativeMultipathEnabled = await nvmeof.nativeMultipathEnabled();
|
||||
const subsystems = await nvmeof.getSubsystems();
|
||||
|
||||
if (nativeMultipathEnabled) {
|
||||
// using per-subsystem namespace
|
||||
for (let subsystem of subsystems) {
|
||||
if (subsystem.Namespaces) {
|
||||
for (let namespace of subsystem.Namespaces) {
|
||||
if (namespace.NameSpace == name) {
|
||||
return subsystem.Controllers;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// using per-controller namespace
|
||||
for (let subsystem of subsystems) {
|
||||
if (subsystem.Controllers) {
|
||||
for (let controller of subsystem.Controllers) {
|
||||
if (controller.Namespaces) {
|
||||
for (let namespace of controller.Namespaces) {
|
||||
if (namespace.NameSpace == name) {
|
||||
return subsystem.Controllers;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvmeof.logger.warn(`failed to find controllers for device: ${name}`);
|
||||
return [];
|
||||
}
|
||||
|
||||
async getControllerByTransportNQN(transport, nqn) {
|
||||
const nvmeof = this;
|
||||
transport = await nvmeof.parseTransport(transport);
|
||||
let subsystem = await nvmeof.getSubsystemByNQN(nqn);
|
||||
if (subsystem) {
|
||||
for (let controller of subsystem.Controllers) {
|
||||
if (controller.Transport != transport.type) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let controllerAddress = controller.Address;
|
||||
/**
|
||||
* For backwards compatibility with older nvme-cli versions (at least < 2.2.1)
|
||||
* old: "Address":"traddr=127.0.0.1 trsvcid=4420"
|
||||
* new: "Address":"traddr=127.0.0.1,trsvcid=4420"
|
||||
*/
|
||||
controllerAddress = controllerAddress.replace(
|
||||
new RegExp(/ ([a-z_]*=)/, "g"),
|
||||
",$1"
|
||||
);
|
||||
let parts = controllerAddress.split(",");
|
||||
|
||||
let traddr;
|
||||
let trsvcid;
|
||||
for (let i_part of parts) {
|
||||
let i_parts = i_part.split("=");
|
||||
switch (i_parts[0].trim()) {
|
||||
case "traddr":
|
||||
traddr = i_parts[1].trim();
|
||||
break;
|
||||
case "trsvcid":
|
||||
trsvcid = i_parts[1].trim();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (traddr != transport.address) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (transport.service && trsvcid != transport.service) {
|
||||
continue;
|
||||
}
|
||||
|
||||
return controller;
|
||||
}
|
||||
}
|
||||
|
||||
nvmeof.logger.warn(
|
||||
`failed to find controller for transport: ${JSON.stringify(
|
||||
transport
|
||||
)}, nqn: ${nqn}`
|
||||
);
|
||||
}
|
||||
|
||||
async nqnByNamespaceDeviceName(name) {
|
||||
const nvmeof = this;
|
||||
name = name.replace("/dev/", "");
|
||||
let nativeMultipathEnabled = await nvmeof.nativeMultipathEnabled();
|
||||
const subsystems = await nvmeof.getSubsystems();
|
||||
|
||||
if (nativeMultipathEnabled) {
|
||||
// using per-subsystem namespace
|
||||
for (let subsystem of subsystems) {
|
||||
if (subsystem.Namespaces) {
|
||||
for (let namespace of subsystem.Namespaces) {
|
||||
if (namespace.NameSpace == name) {
|
||||
return subsystem.SubsystemNQN;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// using per-controller namespace
|
||||
for (let subsystem of subsystems) {
|
||||
if (subsystem.Controllers) {
|
||||
for (let controller of subsystem.Controllers) {
|
||||
if (controller.Namespaces) {
|
||||
for (let namespace of controller.Namespaces) {
|
||||
if (namespace.NameSpace == name) {
|
||||
return subsystem.SubsystemNQN;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
nvmeof.logger.warn(`failed to find nqn for device: ${name}`);
|
||||
}
|
||||
|
||||
devicePathByModelNumberSerialNumber(modelNumber, serialNumber) {
|
||||
modelNumber = modelNumber.replaceAll(" ", "_");
|
||||
serialNumber = serialNumber.replaceAll(" ", "_");
|
||||
return `/dev/disk/by-id/nvme-${modelNumber}_${serialNumber}`;
|
||||
}
|
||||
|
||||
exec(command, args, options = {}) {
|
||||
if (!options.hasOwnProperty("timeout")) {
|
||||
options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
const nvmeof = this;
|
||||
args = args || [];
|
||||
|
||||
if (nvmeof.options.sudo) {
|
||||
args.unshift(command);
|
||||
command = nvmeof.options.paths.sudo;
|
||||
}
|
||||
|
||||
nvmeof.logger.verbose(
|
||||
"executing nvmeof command: %s %s",
|
||||
command,
|
||||
args.join(" ")
|
||||
);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = nvmeof.options.executor.spawn(command, args, options);
|
||||
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
child.stdout.on("data", function (data) {
|
||||
stdout = stdout + data;
|
||||
});
|
||||
|
||||
child.stderr.on("data", function (data) {
|
||||
stderr = stderr + data;
|
||||
});
|
||||
|
||||
child.on("close", function (code) {
|
||||
const result = { code, stdout, stderr, timeout: false };
|
||||
try {
|
||||
result.parsed = JSON.parse(result.stdout);
|
||||
} catch (err) {}
|
||||
|
||||
// timeout scenario
|
||||
if (code === null) {
|
||||
result.timeout = true;
|
||||
reject(result);
|
||||
}
|
||||
|
||||
if (code) {
|
||||
reject(result);
|
||||
} else {
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.NVMEoF = NVMEoF;
|
||||
|
|
@ -118,6 +118,9 @@ class Windows {
|
|||
// -UseWriteThrough $true
|
||||
// cannot have trailing slash nor a path
|
||||
// must be \\<server>\<share>
|
||||
//
|
||||
// https://github.com/kubernetes-csi/csi-driver-smb/issues/219#issuecomment-781952587
|
||||
// -Persistent $false
|
||||
remotePath = this.uncPathToShare(remotePath);
|
||||
command =
|
||||
"$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential -RequirePrivacy $true";
|
||||
|
|
|
|||
Loading…
Reference in New Issue