Compare commits

..

No commits in common. "master" and "v1.7.3" have entirely different histories.

92 changed files with 3465 additions and 14366 deletions

View File

@ -16,7 +16,6 @@ if [[ -n "${IMAGE_TAG}" ]]; then
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
--build-arg OBJECTIVEFS_DOWNLOAD_ID=${OBJECTIVEFS_DOWNLOAD_ID} \
.
else
:

View File

@ -1,5 +1,3 @@
# https://www.truenas.com/software-status/
name: CI
on:
@ -15,23 +13,23 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.12.1
uses: styfle/cancel-workflow-action@0.6.0
with:
access_token: ${{ github.token }}
build-npm-linux-amd64:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
- uses: actions/checkout@v2
- uses: actions/setup-node@v3
with:
node-version: 20
node-version: 16
- shell: bash
name: npm install
run: |
ci/bin/build.sh
- name: upload build
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: node-modules-linux-amd64
path: node_modules-linux-amd64.tar.gz
@ -40,16 +38,16 @@ jobs:
build-npm-windows-amd64:
runs-on: windows-2022
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
- uses: actions/checkout@v2
- uses: actions/setup-node@v3
with:
node-version: 20
node-version: 16
- shell: pwsh
name: npm install
run: |
ci\bin\build.ps1
- name: upload build
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: node-modules-windows-amd64
path: node_modules-windows-amd64.tar.gz
@ -69,8 +67,8 @@ jobs:
- X64
- csi-sanity-synology
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -99,8 +97,8 @@ jobs:
- X64
- csi-sanity-synology
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -115,27 +113,27 @@ jobs:
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
csi-sanity-truenas-scale-24_04:
# api-based drivers
csi-sanity-truenas-scale-22_02:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- truenas/scale/24.04/scale-iscsi.yaml
- truenas/scale/24.04/scale-nfs.yaml
- truenas/scale/22.02/scale-iscsi.yaml
- truenas/scale/22.02/scale-nfs.yaml
# 80 char limit
- truenas/scale/24.04/scale-smb.yaml
- truenas/scale/22.02/scale-smb.yaml
runs-on:
- self-hosted
- Linux
- X64
#- csi-sanity-truenas
- csi-sanity-zfs-generic
- csi-sanity-truenas
#- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -144,7 +142,73 @@ jobs:
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_24_04_HOST }}
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_22_02_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
csi-sanity-truenas-scale-22_12:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- truenas/scale/22.12/scale-iscsi.yaml
- truenas/scale/22.12/scale-nfs.yaml
# 80 char limit
- truenas/scale/22.12/scale-smb.yaml
runs-on:
- self-hosted
- Linux
- X64
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_22_12_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
# ssh-based drivers
csi-sanity-truenas-core-12_0:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
# 63 char limit
- truenas/core/12.0/core-iscsi.yaml
- truenas/core/12.0/core-nfs.yaml
# 80 char limit
- truenas/core/12.0/core-smb.yaml
runs-on:
- self-hosted
- Linux
- X64
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_CORE_12_0_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
@ -154,7 +218,6 @@ jobs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- truenas/core/13.0/core-iscsi.yaml
@ -168,8 +231,8 @@ jobs:
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -188,21 +251,19 @@ jobs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- zfs-generic/iscsi.yaml
- zfs-generic/nfs.yaml
- zfs-generic/smb.yaml
- zfs-generic/nvmeof.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -215,45 +276,6 @@ jobs:
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
# client drivers
csi-sanity-objectivefs:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- objectivefs/objectivefs.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
OBJECTIVEFS_POOL: ${{ secrets.SANITY_OBJECTIVEFS_POOL }}
OBJECTIVEFS_LICENSE: ${{ secrets.SANITY_OBJECTIVEFS_LICENSE }}
OBJECTIVEFS_OBJECTSTORE: ${{ secrets.SANITY_OBJECTIVEFS_OBJECTSTORE }}
OBJECTIVEFS_ENDPOINT_PROTOCOL: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PROTOCOL }}
OBJECTIVEFS_ENDPOINT_HOST: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_HOST }}
OBJECTIVEFS_ENDPOINT_PORT: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PORT }}
OBJECTIVEFS_SECRET_KEY: ${{ secrets.SANITY_OBJECTIVEFS_SECRET_KEY }}
OBJECTIVEFS_ACCESS_KEY: ${{ secrets.SANITY_OBJECTIVEFS_ACCESS_KEY }}
OBJECTIVEFS_PASSPHRASE: ${{ secrets.SANITY_OBJECTIVEFS_PASSPHRASE }}
# these secrets need to match the above secrets for staging/etc
CSI_SANITY_SECRETS: /root/csi-secrets/objectivefs-secrets.yaml
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
# client drivers
csi-sanity-client:
needs:
@ -270,8 +292,8 @@ jobs:
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -298,8 +320,8 @@ jobs:
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-windows-amd64
- name: csi-sanity
@ -328,8 +350,8 @@ jobs:
- X64
- csi-sanity-zfs-local
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -367,8 +389,8 @@ jobs:
- X64
- csi-sanity-local-hostpath
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: ${{ matrix.npmartifact }}
- name: csi-sanity
@ -391,8 +413,8 @@ jobs:
- Windows
- X64
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v2
with:
name: node-modules-windows-amd64
- name: csi-sanity
@ -435,10 +457,10 @@ jobs:
- determine-image-tag
- csi-sanity-synology-dsm6
- csi-sanity-synology-dsm7
- csi-sanity-truenas-scale-24_04
- csi-sanity-truenas-scale-22_02
- csi-sanity-truenas-core-12_0
- csi-sanity-truenas-core-13_0
- csi-sanity-zfs-generic
- csi-sanity-objectivefs
- csi-sanity-client
- csi-sanity-client-windows
- csi-sanity-zfs-local
@ -446,7 +468,7 @@ jobs:
- csi-sanity-windows-node
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
- name: docker build
run: |
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
@ -466,7 +488,6 @@ jobs:
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
OBJECTIVEFS_DOWNLOAD_ID: ${{ secrets.OBJECTIVEFS_DOWNLOAD_ID }}
DOCKER_CLI_EXPERIMENTAL: enabled
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
@ -475,10 +496,10 @@ jobs:
needs:
- csi-sanity-synology-dsm6
- csi-sanity-synology-dsm7
- csi-sanity-truenas-scale-24_04
- csi-sanity-truenas-scale-22_02
- csi-sanity-truenas-core-12_0
- csi-sanity-truenas-core-13_0
- csi-sanity-zfs-generic
- csi-sanity-objectivefs
- csi-sanity-client
- csi-sanity-client-windows
- csi-sanity-zfs-local
@ -498,7 +519,7 @@ jobs:
nano_base_tag: ltsc2022
file: Dockerfile.Windows
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v2
- name: docker build
shell: bash
run: |
@ -510,7 +531,7 @@ jobs:
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
- name: upload image tar
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v2
with:
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
@ -525,11 +546,11 @@ jobs:
- self-hosted
- buildah
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
- uses: actions/checkout@v2
- uses: actions/download-artifact@v3
with:
name: democratic-csi-windows-ltsc2019.tar
- uses: actions/download-artifact@v4
- uses: actions/download-artifact@v3
with:
name: democratic-csi-windows-ltsc2022.tar
- name: push windows images with buildah

View File

@ -1,114 +1,3 @@
# v1.9.4
Release 2024-07-06
- minor doc updates
# v1.9.3
Released 2024-06-01
- minor fixes for objectivefs and iscsi
# v1.9.2
Released 2024-05-23
- minor fixes for objectivefs and iscsi
# v1.9.1
Released 2024-05-06
- fix iscsi hostname lookup regression (#393)
- fix resize issue (#390)
- fix Probe issue (#385)
# v1.9.0
Released 2024-03-26
- new `objectivefs` driver (https://objectivefs.com) support available for x86_64 and arm64
- TrueNAS
- SCALE 24.04 support
- fix `sudo` issue during resize operations (see #295)
- fix version detection logic and default to api version 2 (see #351)
- more robust `Probe` implementation
- contaimer images
- various fixes, improvements, dep upgrades, etc
- update container images to `debian:12` (bookworm)
- bump to nodejs-lts-iron from nodejs-lts-hydrogen
- support csi v1.6.0-v1.9.0
- allow `noop` delete operations (dangerous, only use if you _really_ know what you are doing, see #289)
- properly adhere to the `zvolDedup` and `zvolCompression` settings (see #322)
- `restic` and `kopia` support as a snapshot solution for `local-hostpath` and `*-client` drivers
# v1.8.4
Released 2023-11-09
- allow templatized `volume_id` (dangerous, only use if you _really_ know what you are doing)
- fix TrueNAS SCALE iscsi resize issue
- TrueNAS SCALE 23.10 support
- minor improvements/fixes throughout
- dependency updates
# v1.8.3
Released 2023-04-05
- fix invalid `access_mode` logic (see #287)
# v1.8.2
Released 2023-04-02
- more comprehensive support to manually set `access_modes`
- more intelligent handling of `access_modes` when `access_type=block`
- https://github.com/ceph/ceph-csi/blob/devel/examples/README.md#how-to-test-rbd-multi_node_multi_writer-block-feature
- others? allow this by default
- remove older versions of TrueNAS from ci
# v1.8.1
Released 2023-02-25
- minor fixes
- updated `nvmeof` docs
# v1.8.0
Released 2023-02-23
- `nvmeof` support
# v1.7.7
Released 2022-10-17
- support `csi.access_modes` config value in all zfs-based drivers
- bump deps
# v1.7.6
Released 2022-08-06
- support fo `talos.dev` clusters
- dep bumps
# v1.7.5
Released 2022-08-02
- improved ipv6 iscsi support
- allow using `blkid` for filesystem detection on block devices
# v1.7.4
Released 2022-07-29
- improved ipv6 iscsi support
# v1.7.3
Released 2022-07-28

View File

@ -1,4 +1,4 @@
FROM debian:12-slim AS build
FROM debian:11-slim AS build
#FROM --platform=$BUILDPLATFORM debian:10-slim AS build
ENV DEBIAN_FRONTEND=noninteractive
@ -9,14 +9,14 @@ ARG BUILDPLATFORM
RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM"
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8
ENV NODE_VERSION=v20.11.1
ENV NODE_VERSION=v16.15.1
ENV NODE_ENV=production
# install build deps
RUN apt-get update && apt-get install -y python3 make cmake gcc g++
RUN apt-get update && apt-get install -y python make cmake gcc g++
# install node
RUN apt-get update && apt-get install -y wget xz-utils
@ -26,8 +26,8 @@ ENV PATH=/usr/local/lib/nodejs/bin:$PATH
# Run as a non-root user
RUN useradd --create-home csi \
&& mkdir /home/csi/app \
&& chown -R csi: /home/csi
&& mkdir /home/csi/app \
&& chown -R csi: /home/csi
WORKDIR /home/csi/app
USER csi
@ -40,33 +40,31 @@ RUN rm -rf docker
######################
# actual image
######################
FROM debian:12-slim
FROM debian:11-slim
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.licenses MIT
ENV DEBIAN_FRONTEND=noninteractive
ENV DEMOCRATIC_CSI_IS_CONTAINER=true
ARG TARGETPLATFORM
ARG BUILDPLATFORM
ARG OBJECTIVEFS_DOWNLOAD_ID
RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM"
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8
ENV NODE_ENV=production
# Workaround for https://github.com/nodejs/node/issues/37219
RUN test $(uname -m) != armv7l || ( \
apt-get update \
&& apt-get install -y libatomic1 \
&& rm -rf /var/lib/apt/lists/* \
)
apt-get update \
&& apt-get install -y libatomic1 \
&& rm -rf /var/lib/apt/lists/* \
)
# install node
#ENV PATH=/usr/local/lib/nodejs/bin:$PATH
@ -77,31 +75,14 @@ COPY --from=build /usr/local/lib/nodejs/bin/node /usr/local/bin/node
# netbase is required by rpcbind/rpcinfo to work properly
# /etc/{services,rpc} are required
RUN apt-get update && \
apt-get install -y wget netbase zip bzip2 socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli fuse3 && \
rm -rf /var/lib/apt/lists/*
ARG RCLONE_VERSION=1.66.0
ADD docker/rclone-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/rclone-installer.sh && rclone-installer.sh
ARG RESTIC_VERSION=0.16.4
ADD docker/restic-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/restic-installer.sh && restic-installer.sh
ARG KOPIA_VERSION=0.16.1
ADD docker/kopia-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/kopia-installer.sh && kopia-installer.sh
apt-get install -y netbase socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync && \
rm -rf /var/lib/apt/lists/*
# controller requirements
#RUN apt-get update && \
# apt-get install -y ansible && \
# rm -rf /var/lib/apt/lists/*
# install objectivefs
ARG OBJECTIVEFS_VERSION=7.2
ADD docker/objectivefs-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/objectivefs-installer.sh && objectivefs-installer.sh
# install wrappers
ADD docker/iscsiadm /usr/local/sbin
RUN chmod +x /usr/local/sbin/iscsiadm
@ -126,7 +107,7 @@ RUN chmod +x /usr/local/bin/oneclient
# Run as a non-root user
RUN useradd --create-home csi \
&& chown -R csi: /home/csi
&& chown -R csi: /home/csi
COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app

View File

@ -57,7 +57,7 @@ RUN @( \
gpg --keyserver hkps://keys.openpgp.org --recv-keys $_ ; \
}
ENV NODE_VERSION 16.18.0
ENV NODE_VERSION 16.15.1
RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/SHASUMS256.txt.asc' -f $env:NODE_VERSION) -OutFile 'SHASUMS256.txt.asc' -UseBasicParsing ;
#RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/SHASUMS256.txt.asc' -f $env:NODE_VERSION) -OutFile 'SHASUMS256.txt.asc' -UseBasicParsing ; \

292
README.md
View File

@ -1,6 +1,5 @@
![Image](https://img.shields.io/docker/pulls/democraticcsi/democratic-csi.svg)
![Image](https://img.shields.io/github/actions/workflow/status/democratic-csi/democratic-csi/main.yml?branch=master&style=flat-square)
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/democratic-csi)](https://artifacthub.io/packages/search?repo=democratic-csi)
![Image](https://img.shields.io/github/workflow/status/democratic-csi/democratic-csi/CI?style=flat-square)
# Introduction
@ -24,13 +23,10 @@ have access to resizing, snapshots, clones, etc functionality.
- `freenas-api-smb` experimental use with SCALE only (manages zfs datasets to share over smb)
- `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-smb` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-nvmeof` (works with any ZoL installation...ie: Ubuntu)
- `zfs-local-ephemeral-inline` (provisions node-local zfs datasets)
- `zfs-local-dataset` (provision node-local volume as dataset)
- `zfs-local-zvol` (provision node-local volume as zvol)
- `synology-iscsi` experimental (manages volumes to share over iscsi)
- `objectivefs` (manages objectivefs volumes)
- `lustre-client` (crudely provisions storage using a shared lustre
share/directory for all volumes)
- `nfs-client` (crudely provisions storage using a shared nfs share/directory
@ -39,8 +35,7 @@ have access to resizing, snapshots, clones, etc functionality.
for all volumes)
- `local-hostpath` (crudely provisions node-local directories)
- `node-manual` (allows connecting to manually created smb, nfs, lustre,
oneclient, nvmeof, and iscsi volumes, see sample PVs in the `examples`
directory)
oneclient, and iscsi volumes, see sample PVs in the `examples` directory)
- framework for developing `csi` drivers
If you have any interest in providing a `csi` driver, simply open an issue to
@ -64,7 +59,6 @@ Predominantly 3 things are needed:
from `nfs-client-provisioner` to `democratic-csi`)
- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc
(migrating between storage classes using `velero`)
- https://github.com/fenio/k8s-truenas (NFS/iSCSI over API with TrueNAS Scale)
## Node Prep
@ -72,21 +66,21 @@ You should install/configure the requirements for both nfs and iscsi.
### cifs
```bash
# RHEL / CentOS
```
RHEL / CentOS
sudo yum install -y cifs-utils
# Ubuntu / Debian
Ubuntu / Debian
sudo apt-get install -y cifs-utils
```
### nfs
```bash
# RHEL / CentOS
```
RHEL / CentOS
sudo yum install -y nfs-utils
# Ubuntu / Debian
Ubuntu / Debian
sudo apt-get install -y nfs-common
```
@ -99,9 +93,9 @@ If you are running Kubernetes with rancher/rke please see the following:
- https://github.com/rancher/rke/issues/1846
#### RHEL / CentOS
```
RHEL / CentOS
```bash
# Install the following system packages
sudo yum install -y lsscsi iscsi-initiator-utils sg3_utils device-mapper-multipath
@ -115,11 +109,10 @@ sudo systemctl start iscsid multipathd
# Start and enable iscsi
sudo systemctl enable iscsi
sudo systemctl start iscsi
```
#### Ubuntu / Debian
```
Ubuntu / Debian
# Install the following system packages
sudo apt-get install -y open-iscsi lsscsi sg3-utils multipath-tools scsitools
@ -141,93 +134,14 @@ sudo service open-iscsi start
sudo systemctl status open-iscsi
```
#### [Talos](https://www.talos.dev/)
### freenas-smb
To use iscsi storage in kubernetes cluster in talos these steps are needed which are similar to the ones explained in https://www.talos.dev/v1.1/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/#patching-the-jiva-installation
##### Patch nodes
since talos does not have iscsi support by default, the iscsi extension is needed
create a `patch.yaml` file with
```yaml
- op: add
path: /machine/install/extensions
value:
- image: ghcr.io/siderolabs/iscsi-tools:v0.1.1
```
and apply the patch across all of your nodes
```bash
talosctl -e <endpoint ip/hostname> -n <node ip/hostname> patch mc -p @patch.yaml
```
the extension will not activate until you "upgrade" the nodes, even if there is no update, use the latest version of talos installer.
VERIFY THE TALOS VERSION IN THIS COMMAND BEFORE RUNNING IT AND READ THE [OpenEBS Jiva](https://www.talos.dev/v1.1/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/#patching-the-jiva-installation).
upgrade all of the nodes in the cluster to get the extension
```bash
talosctl -e <endpoint ip/hostname> -n <node ip/hostname> upgrade --image=ghcr.io/siderolabs/installer:v1.1.1
```
in your `values.yaml` file make sure to enable these settings
```yaml
node:
hostPID: true
driver:
extraEnv:
- name: ISCSIADM_HOST_STRATEGY
value: nsenter
- name: ISCSIADM_HOST_PATH
value: /usr/local/sbin/iscsiadm
iscsiDirHostPath: /usr/local/etc/iscsi
iscsiDirHostPathType: ""
```
and continue your democratic installation as usuall with other iscsi drivers.
#### Privileged Namespace
democratic-csi requires privileged access to the nodes, so the namespace should allow for privileged pods. One way of doing it is via [namespace labels](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/).
Add the followin label to the democratic-csi installation namespace `pod-security.kubernetes.io/enforce=privileged`
If using with Windows based machines you may need to enable guest access (even
if you are connecting with credentials)
```
kubectl label --overwrite namespace democratic-csi pod-security.kubernetes.io/enforce=privileged
```
### nvmeof
```bash
# not required but likely helpful (tools are included in the democratic images
# so not needed on the host)
apt-get install -y nvme-cli
# get the nvme fabric modules
apt-get install linux-generic
# ensure the nvmeof modules get loaded at boot
cat <<EOF > /etc/modules-load.d/nvme.conf
nvme
nvme-tcp
nvme-fc
nvme-rdma
EOF
# load the modules immediately
modprobe nvme
modprobe nvme-tcp
modprobe nvme-fc
modprobe nvme-rdma
# nvme has native multipath or can use DM multipath
# democratic-csi will gracefully handle either configuration
# RedHat recommends DM multipath (nvme_core.multipath=N)
cat /sys/module/nvme_core/parameters/multipath
# kernel arg to enable/disable native multipath
nvme_core.multipath=N
Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters AllowInsecureGuestAuth -Value 1
Restart-Service LanmanWorkstation -Force
```
### zfs-local-ephemeral-inline
@ -282,43 +196,17 @@ linux nodes as well (using the `ntfs3` driver) so volumes created can be
utilized by nodes with either operating system (in the case of `cifs` by both
simultaneously).
If using any `-iscsi` driver be sure your iqns are always fully lower-case by
default (https://github.com/PowerShell/PowerShell/issues/17306).
Due to current limits in the kubernetes tooling it is not possible to use the
`local-hostpath` driver but support is implemented in this project and will
work as soon as kubernetes support is available.
```powershell
```
# ensure all updates are installed
# enable the container feature
Enable-WindowsOptionalFeature -Online -FeatureName Containers All
# install a HostProcess compatible kubernetes
# smb support
# If using with Windows based machines you may need to enable guest access
# (even if you are connecting with credentials)
Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters AllowInsecureGuestAuth -Value 1
Restart-Service LanmanWorkstation -Force
# iscsi
# enable iscsi service and mpio as appropriate
Get-Service -Name MSiSCSI
Set-Service -Name MSiSCSI -StartupType Automatic
Start-Service -Name MSiSCSI
Get-Service -Name MSiSCSI
# mpio
Get-WindowsFeature -Name 'Multipath-IO'
Add-WindowsFeature -Name 'Multipath-IO'
Enable-MSDSMAutomaticClaim -BusType "iSCSI"
Disable-MSDSMAutomaticClaim -BusType "iSCSI"
Get-MSDSMGlobalDefaultLoadBalancePolicy
Set-MSDSMGlobalLoadBalancePolicy -Policy RR
```
- https://kubernetes.io/blog/2021/08/16/windows-hostprocess-containers/
@ -336,9 +224,8 @@ with much older versions as well.
The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be
used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh`
connections and do all operations entirely with the TrueNAS api. With that in
mind, any ssh/shell/etc requirements below can be safely ignored. The minimum
volume size through the api is `1G` so beware that requested volumes with a
size small will be increased to `1G`. Also note the following known issues:
mind, any ssh/shell/etc requirements below can be safely ignored. Also note the
following known issues:
- https://jira.ixsystems.com/browse/NAS-111870
- https://github.com/democratic-csi/democratic-csi/issues/112
@ -347,8 +234,6 @@ size small will be increased to `1G`. Also note the following known issues:
Ensure the following services are configurged and running:
- ssh (if you use a password for authentication make sure it is allowed)
- https://www.truenas.com/community/threads/ssh-access-ssh-rsa-not-in-pubkeyacceptedalgorithms.101715/
- `PubkeyAcceptedAlgorithms +ssh-rsa`
- ensure `zsh`, `bash`, or `sh` is set as the root shell, `csh` gives false errors due to quoting
- nfs
- iscsi
@ -425,7 +310,7 @@ Issues to review:
- https://jira.ixsystems.com/browse/NAS-108522
- https://jira.ixsystems.com/browse/NAS-107219
### ZoL (zfs-generic-nfs, zfs-generic-iscsi, zfs-generic-smb, zfs-generic-nvmeof)
### ZoL (zfs-generic-nfs, zfs-generic-iscsi, zfs-generic-smb)
Ensure ssh and zfs is installed on the nfs/iscsi server and that you have installed
`targetcli`.
@ -439,7 +324,7 @@ unecessarily:
- https://github.com/democratic-csi/democratic-csi/issues/151 (some notes on
using delegated zfs permissions)
```bash
```
####### nfs
yum install -y nfs-utils
systemctl enable --now nfs-server.service
@ -460,110 +345,16 @@ useradd -u 1001 -g 1001 -M -N -s /sbin/nologin smbroot
passwd smbroot (optional)
# create smb user and set password
# The pw you will later also need in the client mount options
smbpasswd -L -a smbroot
####### nvmeof
# ensure nvmeof target modules are loaded at startup
cat <<EOF > /etc/modules-load.d/nvmet.conf
nvmet
nvmet-tcp
nvmet-fc
nvmet-rdma
EOF
# load the modules immediately
modprobe nvmet
modprobe nvmet-tcp
modprobe nvmet-fc
modprobe nvmet-rdma
# install nvmetcli and systemd services
git clone git://git.infradead.org/users/hch/nvmetcli.git
cd nvmetcli
## install globally
python3 setup.py install --prefix=/usr
pip install configshell_fb
## install to root home dir
python3 setup.py install --user
pip install configshell_fb --user
# prevent log files from filling up disk
ln -sf /dev/null ~/.nvmetcli/log.txt
ln -sf /dev/null ~/.nvmetcli/history.txt
# install systemd unit and enable/start
## optionally to ensure the config file is loaded before we start
## reading/writing to it add an ExecStartPost= to the unit file
##
## ExecStartPost=/usr/bin/touch /var/run/nvmet-config-loaded
##
## in your dirver config set nvmeof.shareStrategyNvmetCli.configIsImportedFilePath=/var/run/nvmet-config-loaded
## which will prevent the driver from making any changes until the configured
## file is present
vi nvmet.service
cp nvmet.service /etc/systemd/system/
mkdir -p /etc/nvmet
systemctl daemon-reload
systemctl enable --now nvmet.service
systemctl status nvmet.service
# create the port(s) configuration manually
echo "
cd /
ls
" | nvmetcli
# do this multiple times altering as appropriate if you have/want multipath
# change the port to 2, 3.. each additional path
# the below example creates a tcp port listening on all IPs on port 4420
echo "
cd /ports
create 1
cd 1
set addr adrfam=ipv4 trtype=tcp traddr=0.0.0.0 trsvcid=4420
saveconfig /etc/nvmet/config.json
" | nvmetcli
# if running TrueNAS SCALE you can skip the above and simply copy
# contrib/scale-nvmet-start.sh to your machine and add it as a startup script
# to launch POSTINIT type COMMAND
# and then create the port(s) as mentioned above
```
### Synology (synology-iscsi)
Ensure iscsi manager has been installed and is generally setup/configured. DSM 6.3+ is supported.
### objectivefs (objectivefs)
ObjectiveFS requires the use of an _Admin Key_ to properly automate the
lifecycle of filesystems. Each deployment of the driver will point to a single
`pool` (bucket) and create individual `filesystems` within that bucket
on-demand.
Ensure the config value used for `pool` is an existing bucket. Be sure the
bucket is _NOT_ being used in fs mode (ie: the whole bucket is a single fs).
The `democratic-csi` `node` container will host the fuse mount process so
be careful to only upgrade when all relevant workloads have been drained from
the respective node. Also beware that any cpu/memory limits placed on the
container by the orchestration system will impact any ability to use the
caching, etc features of objectivefs.
- https://objectivefs.com/howto/csi-driver-objectivefs
- https://objectivefs.com/howto/csi-driver-objectivefs-kubernetes-managed
- https://objectivefs.com/howto/objectivefs-admin-key-setup
- https://objectivefs.com/features#filesystem-pool
- https://objectivefs.com/howto/how-to-create-a-filesystem-with-an-existing-empty-bucket
## Helm Installation
```bash
```
helm repo add democratic-csi https://democratic-csi.github.io/charts/
helm repo update
# helm v2
@ -607,14 +398,13 @@ microk8s helm upgrade \
- microk8s - `/var/snap/microk8s/common/var/lib/kubelet`
- pivotal - `/var/vcap/data/kubelet`
- k0s - `/var/lib/k0s/kubelet`
### openshift
`democratic-csi` generally works fine with openshift. Some special parameters
need to be set with helm (support added in chart version `0.6.1`):
```bash
```
# for sure required
--set node.rbac.openshift.privileged=true
--set node.driver.localtimeHostPath=false
@ -628,11 +418,6 @@ need to be set with helm (support added in chart version `0.6.1`):
`democratic-csi` works with Nomad in a functioning but limted capacity. See the
[Nomad docs](docs/nomad.md) for details.
### Docker Swarm
- https://github.com/moby/moby/blob/master/docs/cluster_volumes.md
- https://github.com/olljanat/csi-plugins-for-docker-swarm
## Multiple Deployments
You may install multiple deployments of each/any driver. It requires the
@ -647,19 +432,30 @@ following:
- For `iscsi` and `smb` be aware that the names of assets/shares are _global_
and so collisions are possible/probable. Appropriate use of the respective
`nameTemplate`, `namePrefix`, and `nameSuffix` configuration options will
mitigate the issue [#210](https://github.com/democratic-csi/democratic-csi/issues/210).
mitigate the issue ([#210][i210]).
# Snapshot Support
Install snapshot controller (once per cluster):
- https://github.com/democratic-csi/charts/tree/master/stable/snapshot-controller
OR
Install beta (v1.17+) CRDs (once per cluster):
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd
```
kubectl apply -f snapshot.storage.k8s.io_volumesnapshotclasses.yaml
kubectl apply -f snapshot.storage.k8s.io_volumesnapshotcontents.yaml
kubectl apply -f snapshot.storage.k8s.io_volumesnapshots.yaml
```
Install snapshot controller (once per cluster):
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/deploy/kubernetes/snapshot-controller
```
# replace namespace references to your liking
kubectl apply -f rbac-snapshot-controller.yaml
kubectl apply -f setup-snapshot-controller.yaml
```
Install `democratic-csi` as usual with `volumeSnapshotClasses` defined as appropriate.
- https://kubernetes.io/docs/concepts/storage/volume-snapshots/
@ -675,6 +471,12 @@ Copy the `contrib/freenas-provisioner-to-democratic-csi.sh` script from the
project to your workstation, read the script in detail, and edit the variables
to your needs to start migrating!
# Sponsors
A special shout out to the wonderful sponsors of the project!
[![ixSystems](https://www.ixsystems.com/wp-content/uploads/2021/06/ix_logo_200x47.png "ixSystems")](http://ixsystems.com/)
# Related
- https://github.com/nmaupu/freenas-provisioner

View File

@ -63,10 +63,6 @@ const args = require("yargs")
"1.3.0",
"1.4.0",
"1.5.0",
"1.6.0",
"1.7.0",
"1.8.0",
"1.9.0",
],
})
.demandOption(["csi-version"], "csi-version is required")
@ -107,7 +103,6 @@ if (!args.serverSocket && !args.serverAddress && !args.serverPort) {
}
//console.log(args);
//console.log(process.env);
const package = require("../package.json");
args.version = package.version;
@ -140,13 +135,10 @@ const csi = protoDescriptor.csi.v1;
logger.info("initializing csi driver: %s", options.driver);
const { Registry } = require("../src/utils/registry");
let globalRegistry = new Registry();
let driver;
try {
driver = require("../src/driver/factory").factory(
{ logger, args, cache, package, csiVersion, registry: globalRegistry },
{ logger, args, cache, package, csiVersion },
options
);
} catch (err) {
@ -405,58 +397,10 @@ logger.info(
bindSocket
);
const signalMapping = {
1: "SIGHUP",
2: "SIGINT",
3: "SIGQUIT",
4: "SIGILL",
5: "SIGTRAP",
6: "SIGABRT",
7: "SIGEMT",
8: "SIGFPE",
9: "SIGKILL",
10: "SIGBUS",
11: "SIGSEGV",
12: "SIGSYS",
13: "SIGPIPE",
14: "SIGALRM",
15: "SIGTERM",
16: "SIGURG",
17: "SIGSTOP",
18: "SIGTSTP",
19: "SIGCONT",
20: "SIGCHLD",
21: "SIGTTIN",
22: "SIGTTOU",
23: "SIGIO",
24: "SIGXCPU",
25: "SIGXFSZ",
26: "SIGVTALRM",
27: "SIGPROF",
28: "SIGWINCH",
29: "SIGINFO",
30: "SIGUSR1",
31: "SIGUSR2",
};
[(`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`)].forEach(
[`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`].forEach(
(eventType) => {
process.on(eventType, async (code) => {
let codeNumber = null;
let codeName = null;
if (code > 0) {
codeNumber = code;
codeName = signalMapping[code];
} else {
codeNumber = Object.keys(signalMapping).find(
(key) => signalMapping[key] === code
);
codeName = code;
}
console.log(
`running server shutdown, exit code: ${codeNumber} (${codeName})`
);
console.log(`running server shutdown, exit code: ${code}`);
// attempt clean shutdown of in-flight requests
try {
@ -487,7 +431,7 @@ const signalMapping = {
}
console.log("server fully shutdown, exiting");
process.exit(codeNumber);
process.exit(code);
});
}
);

View File

@ -127,7 +127,6 @@ async function main() {
for (let csiVolume of csiVolumes) {
let volume_id = csiVolume.volume.volume_id;
let volume_context = JSON.stringify(csiVolume.volume.volume_context) || "Unknown";
//console.log(`processing csi volume ${volume_id}`);
let k8sVolume = k8sVolumes.find((i_k8sVolume) => {
let volume_handle = _.get(i_k8sVolume, "spec.csi.volumeHandle", null);
@ -135,7 +134,7 @@ async function main() {
});
if (!k8sVolume) {
console.log(`volume ${volume_id} (${volume_context}) is NOT in k8s`);
console.log(`volume ${volume_id} is NOT in k8s`);
if (process.env.DRY_RUN == "1") {
continue;
}
@ -160,7 +159,7 @@ async function main() {
console.log(`skipping delete of csi volume ${volume_id}`);
}
} else {
console.log(`volume ${volume_id} (${volume_context}) is in k8s`);
console.log(`volume ${volume_id} is in k8s`);
}
}

View File

@ -19,32 +19,32 @@ if (! $env:CSI_SANITY_FAILFAST) {
$env:CSI_SANITY_FAILFAST = "false"
}
$failfast = ""
if ($env:CSI_SANITY_FAILFAST -eq "true") {
$failfast = "-ginkgo.failFast"
}
Write-Output "launching csi-sanity"
Write-Output "connecting to: ${endpoint}"
Write-Output "failfast: ${env:CSI_SANITY_FAILFAST}"
Write-Output "skip: ${env:CSI_SANITY_SKIP}"
Write-Output "focus: ${env:CSI_SANITY_FOCUS}"
Write-Output "csi.mountdir: ${env:CSI_SANITY_TEMP_DIR}\mnt"
Write-Output "csi.stagingdir: ${env:CSI_SANITY_TEMP_DIR}\stage"
$exe = "csi-sanity.exe"
$exeargs = @()
$exeargs += "-csi.endpoint", "unix://${endpoint}"
$exeargs += "-csi.mountdir", "${env:CSI_SANITY_TEMP_DIR}\mnt"
$exeargs += "-csi.stagingdir", "${env:CSI_SANITY_TEMP_DIR}\stage"
$exeargs += "-csi.testvolumeexpandsize", "2147483648"
$exeargs += "-csi.testvolumesize", "1073741824"
$exeargs += "--csi.secrets", "${env:CSI_SANITY_SECRETS}"
$exeargs += "-ginkgo.skip", "${env:CSI_SANITY_SKIP}"
$exeargs += "-ginkgo.focus", "${env:CSI_SANITY_FOCUS}"
$skip = '"' + ${env:CSI_SANITY_SKIP} + '"'
$focus = '"' + ${env:CSI_SANITY_FOCUS} + '"'
if ($env:CSI_SANITY_FAILFAST -eq "true") {
$exeargs += "-ginkgo.fail-fast"
}
csi-sanity.exe -"csi.endpoint" "unix://${endpoint}" `
$failfast `
-"csi.mountdir" "${env:CSI_SANITY_TEMP_DIR}\mnt" `
-"csi.stagingdir" "${env:CSI_SANITY_TEMP_DIR}\stage" `
-"csi.testvolumeexpandsize" 2147483648 `
-"csi.testvolumesize" 1073741824 `
-"ginkgo.skip" $skip `
-"ginkgo.focus" $focus
Write-Output "csi-sanity command: $exe $($exeargs -join ' ')"
&$exe $exeargs
# does not work the same as linux for some reason
# -"ginkgo.skip" "'" + ${env:CSI_SANITY_SKIP} + "'" `
if (-not $?) {
$exit_code = $LASTEXITCODE

View File

@ -7,7 +7,7 @@ set -x
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
: ${CSI_SANITY_TEMP_DIR:=$(mktemp -d -t ci-csi-sanity-tmp-XXXXXXXX)}
if [[ ! -S "${CSI_ENDPOINT}" ]]; then
if [[ ! -S "${CSI_ENDPOINT}" ]];then
echo "csi socket: ${CSI_ENDPOINT} does not exist"
exit 1
fi
@ -15,29 +15,27 @@ fi
trap ctrl_c INT
function ctrl_c() {
echo "Trapped CTRL-C"
exit 1
echo "Trapped CTRL-C"
exit 1
}
chmod g+w,o+w "${CSI_ENDPOINT}"
mkdir -p "${CSI_SANITY_TEMP_DIR}"
rm -rf "${CSI_SANITY_TEMP_DIR}"/*
chmod -R 777 "${CSI_SANITY_TEMP_DIR}"
chmod g+w,o+w "${CSI_ENDPOINT}";
mkdir -p "${CSI_SANITY_TEMP_DIR}";
rm -rf "${CSI_SANITY_TEMP_DIR}"/*;
chmod -R 777 "${CSI_SANITY_TEMP_DIR}";
# https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity
# FOR DEBUG: --ginkgo.v
# --csi.secrets=<path to secrets file>
#
# expand size 2073741824 to have mis-alignments
# expand size 2147483648 to have everything line up nicely
csi-sanity --csi.endpoint "unix://${CSI_ENDPOINT}" \
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
--csi.testvolumeexpandsize 2147483648 \
--csi.testvolumesize 1073741824 \
--csi.secrets="${CSI_SANITY_SECRETS}" \
-ginkgo.skip "${CSI_SANITY_SKIP}" \
-ginkgo.focus "${CSI_SANITY_FOCUS}"
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
--csi.testvolumeexpandsize 2147483648 \
--csi.testvolumesize 1073741824 \
-ginkgo.skip "${CSI_SANITY_SKIP}" \
-ginkgo.focus "${CSI_SANITY_FOCUS}"
rm -rf "${CSI_SANITY_TEMP_DIR}"

View File

@ -8,7 +8,7 @@ Set-Location $env:PWD
Write-Output "launching server"
$env:LOG_LEVEL = "debug"
$env:CSI_VERSION = "1.9.0"
$env:CSI_VERSION = "1.5.0"
$env:CSI_NAME = "driver-test"
$env:CSI_SANITY = "1"

View File

@ -9,19 +9,19 @@ echo "current launch-server PATH: ${PATH}"
: ${CI_BUILD_KEY:="local"}
: ${TEMPLATE_CONFIG_FILE:=${1}}
: ${CSI_MODE:=""}
: ${CSI_VERSION:="1.9.0"}
: ${CSI_VERSION:="1.5.0"}
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
: ${LOG_PATH:=/tmp/csi-${CI_BUILD_KEY}.log}
if [[ "x${CONFIG_FILE}" == "x" ]]; then
if [[ "x${CONFIG_FILE}" == "x" ]];then
: ${CONFIG_FILE:=/tmp/csi-config-${CI_BUILD_KEY}.yaml}
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]]; then
envsubst <"${TEMPLATE_CONFIG_FILE}" >"${CONFIG_FILE}"
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]];then
envsubst < "${TEMPLATE_CONFIG_FILE}" > "${CONFIG_FILE}"
fi
fi
if [[ "x${CSI_MODE}" != "x" ]]; then
if [[ "x${CSI_MODE}" != "x" ]];then
EXTRA_ARGS="--csi-mode ${CSI_MODE} ${EXTRA_ARGS}"
fi

View File

@ -1,20 +0,0 @@
driver: objectivefs
objectivefs:
pool: ${OBJECTIVEFS_POOL}
cli:
sudoEnabled: false
env:
OBJECTIVEFS_LICENSE: ${OBJECTIVEFS_LICENSE}
OBJECTSTORE: ${OBJECTIVEFS_OBJECTSTORE}
ENDPOINT: ${OBJECTIVEFS_ENDPOINT_PROTOCOL}://${OBJECTIVEFS_ENDPOINT_HOST}:${OBJECTIVEFS_ENDPOINT_PORT}
SECRET_KEY: ${OBJECTIVEFS_SECRET_KEY}
ACCESS_KEY: ${OBJECTIVEFS_ACCESS_KEY}
OBJECTIVEFS_PASSPHRASE: ${OBJECTIVEFS_PASSPHRASE}
_private:
csi:
volume:
idHash:
# max volume name length is 63
strategy: crc32

View File

@ -29,10 +29,3 @@ iscsi:
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,38 +0,0 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,29 +0,0 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,50 +0,0 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,38 +0,0 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,29 +0,0 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,50 +0,0 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,30 +0,0 @@
driver: zfs-generic-nvmeof
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
nvmeof:
transports:
- "tcp://${SERVER_HOST}:4420"
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareStrategy: "nvmetCli"
shareStrategyNvmetCli:
basename: "nqn.2003-01.org.linux-nvmeof.ubuntu-19.x8664"
ports:
- "1"
subsystem:
attributes:
allow_any_host: 1

View File

@ -43,19 +43,19 @@ if [[ ! -f ${PV_ORIG_FILE} ]]; then
kubectl get pv "${PV}" -o yaml >"${PV_ORIG_FILE}"
fi
reclaimPolicy=$(yq '.spec.persistentVolumeReclaimPolicy' "${PV_ORIG_FILE}")
reclaimPolicy=$(yq eval '.spec.persistentVolumeReclaimPolicy' "${PV_ORIG_FILE}")
# copy file for editing
cp "${PV_ORIG_FILE}" "${PV_TMP_FILE}"
# pre-process before edit
yq -i 'del(.metadata.resourceVersion)' "${PV_TMP_FILE}"
yq -i eval 'del(.metadata.resourceVersion)' "${PV_TMP_FILE}"
# manually edit
${EDITOR} "${PV_TMP_FILE}"
# ask if looks good
yq '.' "${PV_TMP_FILE}"
yq eval '.' "${PV_TMP_FILE}"
yes_or_no "Would you like to delete the existing PV object and recreate with the above data?"
# set relaim to Retain on PV

View File

@ -1,108 +0,0 @@
#!/bin/bash
# simple script to 'start' nvmet on TrueNAS SCALE
#
# to reinstall nvmetcli simply rm /usr/sbin/nvmetcli
# debug
#set -x
# exit non-zero
set -e
SCRIPTDIR="$(
cd -- "$(dirname "$0")" >/dev/null 2>&1
pwd -P
)"
cd "${SCRIPTDIR}"
: "${NVMETCONFIG:="${SCRIPTDIR}/nvmet-config.json"}"
: "${NVMETVENV:="${SCRIPTDIR}/nvmet-venv"}"
export PATH=${HOME}/.local/bin:${PATH}
main() {
kernel_modules
nvmetcli ls &>/dev/null || {
setup_venv
install_nvmetcli
}
nvmetcli_restore
}
kernel_modules() {
modules=()
modules+=("nvmet")
modules+=("nvmet-fc")
modules+=("nvmet-rdma")
modules+=("nvmet-tcp")
for module in "${modules[@]}"; do
modprobe "${module}"
done
}
setup_venv() {
rm -rf ${NVMETVENV}
python -m venv ${NVMETVENV} --without-pip --system-site-packages
activate_venv
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python get-pip.py
rm get-pip.py
deactivate_venv
}
activate_venv() {
. ${NVMETVENV}/bin/activate
}
deactivate_venv() {
deactivate
}
install_nvmetcli() {
if [[ ! -d nvmetcli ]]; then
git clone git://git.infradead.org/users/hch/nvmetcli.git
fi
cd nvmetcli
activate_venv
# install to root home dir
python3 setup.py install --install-scripts=${HOME}/.local/bin
# install to root home dir
pip install configshell_fb==1.1.30
# remove source
cd "${SCRIPTDIR}"
rm -rf nvmetcli
deactivate_venv
}
nvmetcli_restore() {
activate_venv
cd "${SCRIPTDIR}"
nvmetcli restore "${NVMETCONFIG}"
deactivate_venv
touch /var/run/nvmet-config-loaded
chmod +r /var/run/nvmet-config-loaded
}
main

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +0,0 @@
#!/bin/bash
# v1.6.0
VERSION=${1}
curl -v -o "csi-${VERSION}.proto" https://raw.githubusercontent.com/container-storage-interface/spec/${VERSION}/csi.proto

View File

@ -1,28 +1,5 @@
#!/bin/bash
: "${ISCSIADM_HOST_STRATEGY:=chroot}"
: "${ISCSIADM_HOST_PATH:=iscsiadm}"
# https://engineering.docker.com/2019/07/road-to-containing-iscsi/
echoerr() { printf "%s\n" "$*" >&2; }
case ${ISCSIADM_HOST_STRATEGY} in
chroot)
# https://engineering.docker.com/2019/07/road-to-containing-iscsi/
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" ${ISCSIADM_HOST_PATH} "${@:1}"
;;
nsenter)
# https://github.com/siderolabs/extensions/issues/38#issuecomment-1125403043
iscsid_pid=$(pgrep --exact --oldest iscsid)
if [[ "${iscsid_pid}x" == "x" ]]; then
echoerr "failed to find iscsid pid for nsenter"
exit 1
fi
nsenter --mount="/proc/${iscsid_pid}/ns/mnt" --net="/proc/${iscsid_pid}/ns/net" -- ${ISCSIADM_HOST_PATH} "${@:1}"
;;
*)
echoerr "invalid ISCSIADM_HOST_STRATEGY: ${ISCSIADM_HOST_STRATEGY}"
exit 1
;;
esac
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" iscsiadm "${@:1}"

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="armhf"
else
echo "unsupported/unknown kopia PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing kopia $KOPIA_VERSION"
export DEB_FILE="kopia.deb"
wget -O "${DEB_FILE}" "https://github.com/kopia/kopia/releases/download/v${KOPIA_VERSION}/kopia_${KOPIA_VERSION}_linux_${PLATFORM_ARCH}.deb"
dpkg -i "${DEB_FILE}"
rm "${DEB_FILE}"

View File

@ -21,8 +21,8 @@ while getopts "t:" opt; do
case "$opt" in
t)
if [[ "x${USE_HOST_MOUNT_TOOLS}" == "x" ]]; then
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
fi

View File

@ -1,40 +0,0 @@
#!/bin/bash
set -e
set -x
if [[ -z "${OBJECTIVEFS_DOWNLOAD_ID}" ]]; then
echo 'missing OBJECTIVEFS_DOWNLOAD_ID, moving on'
exit 0
fi
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then
export OBJECTIVEFS_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export OBJECTIVEFS_ARCH="arm64"
else
echo "unsupported/unknown PLATFORM ${PLATFORM}"
exit 0
fi
export DEB_FILE="objectivefs_${OBJECTIVEFS_VERSION}_${OBJECTIVEFS_ARCH}.deb"
echo "I am installing objectivefs $OBJECTIVEFS_VERSION"
wget "https://objectivefs.com/user/download/${OBJECTIVEFS_DOWNLOAD_ID}/${DEB_FILE}"
dpkg -i "${DEB_FILE}"
rm "${DEB_FILE}"

View File

@ -1,41 +0,0 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="arm-v7"
else
echo "unsupported/unknown restic PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing rclone $RCLONE_VERSION"
export ZIP_FILE="rclone.zip"
wget -O "${ZIP_FILE}" "https://github.com/rclone/rclone/releases/download/v${RCLONE_VERSION}/rclone-v${RCLONE_VERSION}-linux-${PLATFORM_ARCH}.zip"
unzip "${ZIP_FILE}"
mv rclone-*-linux-*/rclone /usr/local/bin/rclone
rm -rf rclone-*-linux-*
chown root:root /usr/local/bin/rclone
chmod +x /usr/local/bin/rclone

View File

@ -1,42 +0,0 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="arm"
elif [ "$PLATFORM" = "linux/s390x" ]; then
export PLATFORM_ARCH="s390x"
elif [ "$PLATFORM" = "linux/ppc64le" ]; then
export PLATFORM_ARCH="ppc64le"
else
echo "unsupported/unknown restic PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing restic $RESTIC_VERSION"
export TAR_FILE="restic.bz2"
wget -O "${TAR_FILE}" "https://github.com/restic/restic/releases/download/v${RESTIC_VERSION}/restic_${RESTIC_VERSION}_linux_${PLATFORM_ARCH}.bz2"
bunzip2 "${TAR_FILE}"
mv restic /usr/local/bin
chown root:root /usr/local/bin/restic
chmod +x /usr/local/bin/restic

View File

@ -21,8 +21,8 @@ while getopts "t:" opt; do
case "$opt" in
t)
if [[ "x${USE_HOST_MOUNT_TOOLS}" == "x" ]]; then
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
fi

View File

@ -11,10 +11,6 @@ job "democratic-csi-iscsi-node" {
env {
CSI_NODE_ID = "${attr.unique.hostname}"
# if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
# you can configure the fs detection system used with the following envvar:
#FILESYSTEM_TYPE_DETECTION_STRATEGY = "blkid"
}
config {
@ -42,15 +38,6 @@ job "democratic-csi-iscsi-node" {
source = "/"
readonly=false
}
# if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
# you can try uncommenting the following additional mount block:
#mount {
# type = "bind"
# target = "/run/udev"
# source = "/run/udev"
# readonly = true
#}
}
template {

View File

@ -6,44 +6,17 @@ job "democratic-csi-nfs-controller" {
driver = "docker"
config {
image = "docker.io/democraticcsi/democratic-csi:${var.version}"
image = "docker.io/democraticcsi/democratic-csi:latest"
entrypoint = [
"${NOMAD_TASK_DIR}/init.sh"
args = [
"--csi-version=1.5.0",
# must match the csi_plugin.id attribute below
"--csi-name=org.democratic-csi.nfs",
"--driver-config-file=${NOMAD_TASK_DIR}/driver-config-file.yaml",
"--log-level=info",
"--csi-mode=controller",
"--server-socket=/csi/csi.sock",
]
network_mode = "host"
privileged = true
}
env {
NFS_SERVER = "<nfs server>"
NFS_SHARE = "<nfs share>"
}
# The nfs share is mounted in the controller so it can create the volumes
# sub directories inside the nfs share
template {
destination = "${NOMAD_TASK_DIR}/init.sh"
perms = "755"
data = <<-EOT
#!/bin/sh
if [ ! -d /storage ]; then
mkdir -p /storage
fi
mount "{{ env "NFS_SERVER" }}:{{ env "NFS_SHARE" }}" /storage
exec ./bin/democratic-csi \
--csi-version=1.5.0 \
--csi-name=org.democratic-csi.nfs \
--driver-config-file={{ env "NOMAD_TASK_DIR" }}/driver-config-file.yaml \
--log-level=info \
--csi-mode=controller \
--server-socket=/csi/csi.sock
EOT
}
template {

View File

@ -1,6 +0,0 @@
# common options for the controller service
csi:
# manual override of the available access modes for the deployment
# generally highly uncessary to alter so only use in advanced scenarios
#access_modes: []

View File

@ -42,7 +42,6 @@ zfs:
datasetParentName: tank/k8s/b/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
@ -68,8 +67,6 @@ iscsi:
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
# https://github.com/democratic-csi/democratic-csi/issues/302
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
- targetGroupPortalGroup: 1
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1

View File

@ -37,7 +37,6 @@ zfs:
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false

View File

@ -42,7 +42,6 @@ zfs:
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false

View File

@ -51,7 +51,6 @@ zfs:
datasetParentName: tank/k8s/b/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
@ -77,8 +76,6 @@ iscsi:
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
# https://github.com/democratic-csi/democratic-csi/issues/302
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
- targetGroupPortalGroup: 1
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1

View File

@ -47,7 +47,6 @@ zfs:
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false

View File

@ -53,7 +53,6 @@ zfs:
datasetParentName: tank/k8s/a/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
datasetEnableQuotas: true
datasetEnableReservation: false

View File

@ -3,57 +3,8 @@ instance_id:
local-hostpath:
# generally shareBasePath and controllerBasePath should be the same for this
# driver, this path should be mounted into the csi-driver container
shareBasePath: "/var/lib/csi-local-hostpath"
shareBasePath: "/var/lib/csi-local-hostpath"
controllerBasePath: "/var/lib/csi-local-hostpath"
dirPermissionsMode: "0777"
dirPermissionsUser: 0
dirPermissionsGroup: 0
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# of local-hostpath the node name will be appended
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# of local-hostpath the node name will be appended
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -1,7 +1,6 @@
driver: lustre-client
instance_id:
lustre:
# <MGS NID>[:<MGS NID>]
shareHost: server address
shareBasePath: "/some/path"
# shareHost:shareBasePath should be mounted at this location in the controller container
@ -9,50 +8,3 @@ lustre:
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# backup hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -8,50 +8,3 @@ nfs:
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -27,8 +27,6 @@ node:
customOptions: []
#- -E
#- nodiscard
#- m
#- 0
# ...
btrfs:
customOptions: []

View File

@ -9,6 +9,21 @@ spec:
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
# can be used to handle CHAP
# in the secret create the following keys:
#
# # any arbitrary iscsiadm entries can be add by creating keys starting with node-db.<entry.name>
# # if doing CHAP
# node-db.node.session.auth.authmethod: CHAP
# node-db.node.session.auth.username: foo
# node-db.node.session.auth.password: bar
#
# # if doing mutual CHAP
# node-db.node.session.auth.username_in: baz
# node-db.node.session.auth.password_in: bar
#nodeStageSecretRef:
# name: some name
# namespace: some namespace
mountOptions: []
csi:
driver: org.democratic-csi.node-manual
@ -16,21 +31,6 @@ spec:
# can be ext4 or xfs
fsType: ext4
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
# can be used to handle CHAP
# in the secret create the following keys:
#
# # any arbitrary iscsiadm entries can be add by creating keys starting with node-db.<entry.name>
# # if doing CHAP
# node-db.node.session.auth.authmethod: CHAP
# node-db.node.session.auth.username: foo
# node-db.node.session.auth.password: bar
#
# # if doing mutual CHAP
# node-db.node.session.auth.username_in: baz
# node-db.node.session.auth.password_in: bar
#nodeStageSecretRef:
# name: some name
# namespace: some namespace
volumeAttributes:
portal: <ip:port>
#portals: <ip:port>,<ip:port>,...

View File

@ -1,26 +0,0 @@
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: nvmeof-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
mountOptions: []
csi:
driver: org.democratic-csi.node-manual
readOnly: false
# can be ext4 or xfs
fsType: ext4
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
volumeAttributes:
# rdma and fc are also available
transport: tcp://<ip:port>,
#transports: <transport>,<transport>,...
nqn: <nqn>
nsid: <nsid>
node_attach_driver: "nvmeof"
provisioner_driver: node-manual

View File

@ -1,51 +0,0 @@
---
apiVersion: v1
kind: Secret
metadata:
name: objectivefs-secret
namespace: kube-system
stringData:
# these can be defined here OR in volumeAttributes
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
"env.OBJECTSTORE": ""
"env.ACCESS_KEY": ""
"env.SECRET_KEY": ""
"env.OBJECTIVEFS_PASSPHRASE": ""
# does NOT need admin key appended for node-manual operations
"env.OBJECTIVEFS_LICENSE": ""
"env.ENDPOINT": ""
# ...
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: objectivefs-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
[]
# https://objectivefs.com/userguide#mount
#- nodiratime
#- noatime
#- fsavail=<size>
csi:
driver: org.democratic-csi.node-manual
readOnly: false
fsType: objectivefs
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
nodeStageSecretRef:
name: objectivefs-secret
namespace: kube-system
volumeAttributes:
node_attach_driver: objectivefs
provisioner_driver: node-manual
filesystem: "ofs/test"
# these can be defined here OR in the secret referenced above
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
#"env.OBJECTSTORE": "minio://"
#"env.ACCESS_KEY": ""
# ...

View File

@ -9,6 +9,9 @@ spec:
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
#nodeStageSecretRef:
# name: some name
# namespace: some namespace
mountOptions:
# creds can be entered into the node-stage-secret in the `mount_flags` key
# the value should be: username=foo,password=bar
@ -19,9 +22,6 @@ spec:
readOnly: false
fsType: cifs
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
#nodeStageSecretRef:
# name: some name
# namespace: some namespace
volumeAttributes:
server: host or ip
share: someshare

View File

@ -1,32 +0,0 @@
driver: objectivefs
objectivefs:
# note, ALL provisioned filesystems will be created in this pool / bucket
# with the same passphrase entered below
#
# in general this pool should be considered as fully managed by democratic-csi
# so a dedicated pool per-cluster / deployment would be best practice
#
pool: ofscsi
cli:
sudoEnabled: false
env:
# NOTE: this must be the license key + admin key
# admin key feature must be activated on your account
# https://objectivefs.com/howto/objectivefs-admin-key-setup
OBJECTIVEFS_LICENSE:
OBJECTSTORE:
ENDPOINT:
SECRET_KEY:
ACCESS_KEY:
# do NOT change this once it has been set and deployed
OBJECTIVEFS_PASSPHRASE:
# ...
_private:
csi:
volume:
idHash:
# due to 63 char limit on objectivefs fs name, we should
# hash volume names to prevent fs names which are too long
# can be 1 of md5, crc8, crc16, crc32
strategy: crc32

View File

@ -8,31 +8,10 @@
_private:
csi:
volume:
volumeContext:
derivedContext:
# driver left blank is used to auto select
driver: memory # strictly to facilitate testing
#driver: kubernetes
# THIS IS UNSUPPORTED, BAD THINGS WILL HAPPEN IF NOT CONFIGURED PROPERLY
# https://github.com/democratic-csi/democratic-csi/issues/289
#
# note the volume length must *always* be the same for every call for the same volume by the CO
# the length must NOT execeed 128 characters
# must start with an alphanumeric character
# must only contain alphnumeric characters or `-` or `_`
idTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# THIS IS UNSUPPORTED, BAD THINGS WILL HAPPEN IF NOT CONFIGURED PROPERLY
# https://github.com/democratic-csi/democratic-csi/issues/289
#
# in order for this to behave sanely you *MUST* set consistent templates for
# share names/assets (ie: nfs/iscsi/etc) and the `idTemplate` above
#
# setting to retain results in noop delete opertions (both shares where applicable and volumes remain intact)
# delete|retain
deleteStrategy: retain
# if set, this hash is applied *after* the templating above
idHash:
strategy: crc16
#strategy: crc32

View File

@ -8,50 +8,3 @@ smb:
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -31,7 +31,6 @@ zfs:
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
# "" (inherit), lz4, gzip-9, etc
@ -71,10 +70,6 @@ iscsi:
# mutual CHAP
#mutual_userid: "baz"
#mutual_password: "bar"
block:
attributes:
# set to 1 to enable Thin Provisioning Unmap
emulate_tpu: 0
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]

View File

@ -31,7 +31,6 @@ zfs:
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
datasetEnableQuotas: true

View File

@ -1,103 +0,0 @@
driver: zfs-generic-nvmeof
sshConnection:
host: server address
port: 22
username: root
# use either password or key
password: ""
privateKey: |
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
zfs:
# can be used to override defaults if necessary
# the example below is useful for TrueNAS 12
#cli:
# sudoEnabled: true
# paths:
# zfs: /usr/local/sbin/zfs
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
# "" (inherit), lz4, gzip-9, etc
zvolCompression:
# "" (inherit), on, off, verify
zvolDedup:
zvolEnableReservation: false
# 512, 1K, 2K, 4K, 8K, 16K, 64K, 128K default is 16K
zvolBlocksize:
nvmeof:
# these are for the node/client aspect
transports:
- tcp://server:port
#- "tcp://127.0.0.1:4420?host-iface=eth0"
#- "tcp://[2001:123:456::1]:4420"
#- "rdma://127.0.0.1:4420"
#- "fc://[nn-0x203b00a098cbcac6:pn-0x203d00a098cbcac6]"
# MUST ensure uniqueness
# full iqn limit is 223 bytes, plan accordingly
# default is "{{ name }}"
#nameTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
namePrefix:
nameSuffix:
shareStrategy: "nvmetCli"
#shareStrategy: "spdkCli"
# https://documentation.suse.com/es-es/sles/15-SP1/html/SLES-all/cha-nvmeof.html
# https://www.linuxjournal.com/content/data-flash-part-iii-nvme-over-fabrics-using-tcp
# http://git.infradead.org/users/hch/nvmetcli.git
shareStrategyNvmetCli:
#sudoEnabled: true
# /root/.local/bin/nvmetcli
#nvmetcliPath: nvmetcli
# prevent startup race conditions by ensuring the config on disk has been imported
# before we start messing with things
#configIsImportedFilePath: /var/run/nvmet-config-loaded
#configPath: /etc/nvmet/config.json
basename: "nqn.2003-01.org.linux-nvme"
# add more ports here as appropriate if you have multipath
ports:
- "1"
subsystem:
attributes:
allow_any_host: 1
# not supported yet in nvmetcli
#namespace:
# attributes:
# buffered_io: 1
shareStrategySpdkCli:
# spdkcli.py
#spdkcliPath: spdkcli
configPath: /etc/spdk/spdk.json
basename: "nqn.2003-01.org.linux-nvmeof"
bdev:
type: uring
#type: aio
attributes:
block_size: 512
subsystem:
attributes:
allow_any_host: "true"
listeners:
- trtype: tcp
traddr: server
trsvcid: port
adrfam: ipv4

View File

@ -32,7 +32,6 @@ zfs:
datasetParentName: tank/k8s/test
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
datasetEnableQuotas: true

3484
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,6 @@
{
"name": "democratic-csi",
"version": "1.9.0",
"version": "1.7.3",
"description": "kubernetes csi driver framework",
"main": "bin/democratic-csi",
"scripts": {
@ -18,14 +18,13 @@
"url": "https://github.com/democratic-csi/democratic-csi.git"
},
"dependencies": {
"@grpc/grpc-js": "^1.8.4",
"@grpc/grpc-js": "^1.5.7",
"@grpc/proto-loader": "^0.7.0",
"@kubernetes/client-node": "^0.18.0",
"async-mutex": "^0.4.0",
"axios": "^1.1.3",
"@kubernetes/client-node": "^0.17.0",
"async-mutex": "^0.3.1",
"axios": "^0.27.2",
"bunyan": "^1.8.15",
"crc": "^4.3.2",
"fs-extra": "^11.1.0",
"fs-extra": "^10.1.0",
"handlebars": "^4.7.7",
"js-yaml": "^4.0.0",
"lodash": "^4.17.21",
@ -34,7 +33,7 @@
"semver": "^7.3.4",
"ssh2": "^1.1.0",
"uri-js": "^4.4.1",
"uuid": "^9.0.0",
"uuid": "^8.3.2",
"winston": "^3.6.0",
"yargs": "^17.0.1"
},

View File

@ -4,19 +4,9 @@ const { GrpcError, grpc } = require("../../utils/grpc");
const cp = require("child_process");
const fs = require("fs");
const fse = require("fs-extra");
const Kopia = require("../../utils/kopia").Kopia;
const os = require("os");
const path = require("path");
const Restic = require("../../utils/restic").Restic;
const semver = require("semver");
const __REGISTRY_NS__ = "ControllerClientCommonDriver";
// https://forum.restic.net/t/how-to-prevent-two-restic-tasks-concurrently/6859/5
const SNAPSHOTS_CUT_IN_FLIGHT = new Set();
const SNAPSHOTS_RESTORE_IN_FLIGHT = new Set();
const DEFAULT_SNAPSHOT_DRIVER = "filecopy";
/**
* Crude nfs-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
@ -112,48 +102,6 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
if (this.ctx.args.csiMode.includes("controller")) {
setInterval(() => {
this.ctx.logger.info("snapshots cut in flight", {
names: [...SNAPSHOTS_CUT_IN_FLIGHT],
count: SNAPSHOTS_CUT_IN_FLIGHT.size,
});
}, 30 * 1000);
setInterval(() => {
this.ctx.logger.info("snapshots restore in flight", {
names: [...SNAPSHOTS_RESTORE_IN_FLIGHT],
count: SNAPSHOTS_RESTORE_IN_FLIGHT.size,
});
}, 30 * 1000);
}
}
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
assertCapabilities(capabilities) {
@ -178,7 +126,16 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
}
if (
!this.getAccessModes(capability).includes(capability.access_mode.mode)
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
@ -454,90 +411,6 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
return p.replaceAll(path.posix.sep, path.win32.sep);
}
async getResticClient() {
const driver = this;
return this.ctx.registry.get(`${__REGISTRY_NS__}:restic`, () => {
const config_key = driver.getConfigKey();
const restic_env = _.get(
driver.options[config_key],
"snapshots.restic.env",
{}
);
const restic_global_flags = _.get(
driver.options[config_key],
"snapshots.restic.global_flags",
[]
);
const client = new Restic({
env: restic_env,
logger: driver.ctx.logger,
global_flags: restic_global_flags,
});
let hostname = driver.ctx.args.csiName;
if (driver.options.driver == "local-hostpath") {
let nodename = process.env.CSI_NODE_ID || os.hostname();
hostname = `${hostname}-${nodename}`;
}
return client;
});
}
async getKopiaClient() {
const driver = this;
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:kopia`, async () => {
const config_key = driver.getConfigKey();
const kopia_env = _.get(
driver.options[config_key],
"snapshots.kopia.env",
{}
);
const kopia_global_flags = _.get(
driver.options[config_key],
"snapshots.kopia.global_flags",
[]
);
const client = new Kopia({
env: kopia_env,
logger: driver.ctx.logger,
global_flags: kopia_global_flags,
});
let hostname = driver.ctx.args.csiName;
if (driver.options.driver == "local-hostpath") {
let nodename = process.env.CSI_NODE_ID || os.hostname();
hostname = `${hostname}-${nodename}`;
}
let username = "democratic-csi";
await client.repositoryConnect([
"--override-hostname",
hostname,
"--override-username",
username,
"from-config",
"--token",
_.get(driver.options[config_key], "snapshots.kopia.config_token", ""),
]);
//let repositoryStatus = await client.repositoryStatus();
//console.log(repositoryStatus);
client.hostname = hostname;
client.username = username;
return client;
});
}
/**
* Create a volume doing in essence the following:
* 1. create directory
@ -551,10 +424,16 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
async CreateVolume(call) {
const driver = this;
const config_key = driver.getConfigKey();
const volume_id = await driver.getVolumeIdFromCall(call);
const volume_content_source = call.request.volume_content_source;
const instance_id = driver.options.instance_id;
let config_key = this.getConfigKey();
let name = call.request.name;
let volume_content_source = call.request.volume_content_source;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (
call.request.volume_capabilities &&
@ -616,7 +495,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
);
}
const volume_path = driver.getControllerVolumePath(volume_id);
const volume_path = driver.getControllerVolumePath(name);
let response;
let source_path;
@ -628,117 +507,13 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
// create dataset
if (volume_content_source) {
let snapshot_driver;
let snapshot_id;
if (volume_content_source.type == "snapshot") {
snapshot_id = volume_content_source.snapshot.snapshot_id;
// get parsed variant of driver to allow snapshotter to work with all
// drivers simultaneously
const parsed_snapshot_id = new URLSearchParams(snapshot_id);
if (parsed_snapshot_id.get("snapshot_driver")) {
snapshot_id = parsed_snapshot_id.get("snapshot_id");
snapshot_driver = parsed_snapshot_id.get("snapshot_driver");
} else {
snapshot_driver = "filecopy";
}
}
switch (volume_content_source.type) {
// must be available when adverstising CREATE_DELETE_SNAPSHOT
// simply clone
case "snapshot":
switch (snapshot_driver) {
case "filecopy":
{
source_path = driver.getControllerSnapshotPath(snapshot_id);
if (!(await driver.directoryExists(source_path))) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_content_source path: ${source_path}`
);
}
driver.ctx.logger.debug(
"controller volume source path: %s",
source_path
);
await driver.cloneDir(source_path, volume_path);
}
break;
case "restic":
{
const restic = await driver.getResticClient();
let options = [];
await restic.init();
// find snapshot
options = [snapshot_id];
const snapshots = await restic.snapshots(options);
if (!snapshots.length > 0) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid restic snapshot volume_content_source: ${snapshot_id}`
);
}
const snapshot = snapshots[snapshots.length - 1];
// restore snapshot
// --verify?
options = [
`${snapshot.id}:${snapshot.paths[0]}`,
"--target",
volume_path,
"--sparse",
"--host",
restic.hostname,
];
// technically same snapshot could be getting restored to multiple volumes simultaneously
// ensure we add target path as part of the key
SNAPSHOTS_RESTORE_IN_FLIGHT.add(
`${snapshot_id}:${volume_path}`
);
await restic.restore(options).finally(() => {
SNAPSHOTS_RESTORE_IN_FLIGHT.delete(
`${snapshot_id}:${volume_path}`
);
});
}
break;
case "kopia":
{
const kopia = await driver.getKopiaClient();
const snapshot = await kopia.snapshotGet(snapshot_id);
if (!snapshot) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid restic snapshot volume_content_source: ${snapshot_id}`
);
}
/**
* --[no-]write-files-atomically
* --[no-]write-sparse-files
*/
let options = [
"--write-sparse-files",
snapshot_id,
volume_path,
];
await kopia.snapshotRestore(options);
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unknown snapthot driver: ${snapshot_driver}`
);
}
source_path = driver.getControllerSnapshotPath(
volume_content_source.snapshot.snapshot_id
);
break;
// must be available when adverstising CLONE_VOLUME
// create snapshot first, then clone
@ -746,26 +521,24 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
source_path = driver.getControllerVolumePath(
volume_content_source.volume.volume_id
);
if (!(await driver.directoryExists(source_path))) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_content_source path: ${source_path}`
);
}
driver.ctx.logger.debug(
"controller volume source path: %s",
source_path
);
await driver.cloneDir(source_path, volume_path);
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`invalid volume_content_source type: ${volume_content_source.type}`
);
break;
}
if (!(await driver.directoryExists(source_path))) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_content_source path: ${source_path}`
);
}
driver.ctx.logger.debug("controller source path: %s", source_path);
await driver.cloneDir(source_path, volume_path);
}
// set mode
@ -805,7 +578,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
}
}
let volume_context = driver.getVolumeContext(volume_id);
let volume_context = driver.getVolumeContext(name);
volume_context["provisioner_driver"] = driver.options.driver;
if (driver.options.instance_id) {
@ -820,7 +593,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
const res = {
volume: {
volume_id,
volume_id: name,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
@ -843,27 +616,16 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
async DeleteVolume(call) {
const driver = this;
const volume_id = call.request.volume_id;
let name = call.request.volume_id;
if (!volume_id) {
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
const volume_path = driver.getControllerVolumePath(volume_id);
const volume_path = driver.getControllerVolumePath(name);
await driver.deleteDir(volume_path);
return {};
@ -944,49 +706,14 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
}
/**
* Create snapshot is meant to be a syncronous call to 'cut' the snapshot
* in the case of rsync/restic/kopia/etc tooling a 'cut' can take a very
* long time. It was deemed appropriate to continue to wait vs making the
* call async with `ready_to_use` false.
*
* Restic:
* With restic the idea is to keep the tree scoped to each volume. Each
* new snapshot for the same volume should have a parent of the most recently
* cut snapshot for the same volume. Behind the scenes restic is applying
* dedup logic globally in the repo so efficiency should still be extremely
* efficient.
*
* Kopia:
*
*
* https://github.com/container-storage-interface/spec/blob/master/spec.md#createsnapshot
*
* @param {*} call
*/
async CreateSnapshot(call) {
const driver = this;
const config_key = driver.getConfigKey();
let snapshot_driver = _.get(
driver.options[config_key],
"snapshots.default_driver",
DEFAULT_SNAPSHOT_DRIVER
);
// randomize driver for testing
//if (process.env.CSI_SANITY == "1") {
// call.request.parameters.driver = ["filecopy", "restic", "kopia"].random();
//}
if (call.request.parameters.driver) {
snapshot_driver = call.request.parameters.driver;
}
const instance_id = driver.options.instance_id;
let response;
// both these are required
const source_volume_id = call.request.source_volume_id;
let source_volume_id = call.request.source_volume_id;
let name = call.request.name;
if (!source_volume_id) {
@ -1021,262 +748,17 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
name = name.replace(/[^a-z0-9_\-:.+]+/gi, "");
driver.ctx.logger.verbose("cleansed snapshot name: %s", name);
const snapshot_id = `${source_volume_id}-${name}`;
const volume_path = driver.getControllerVolumePath(source_volume_id);
//const volume_path = "/home/thansen/beets/";
//const volume_path = "/var/lib/docker/";
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
let snapshot_id;
let size_bytes = 0;
let ready_to_use = true;
let snapshot_date = new Date();
switch (snapshot_driver) {
case "filecopy":
{
snapshot_id = `${source_volume_id}-${name}`;
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
const snapshot_dir_exists = await driver.directoryExists(
snapshot_path
);
// do NOT overwrite existing snapshot
if (!snapshot_dir_exists) {
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
await driver.cloneDir(volume_path, snapshot_path).finally(() => {
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
});
driver.ctx.logger.info(
`filecopy backup finished: snapshot_id=${snapshot_id}, path=${volume_path}`
);
} else {
driver.ctx.logger.debug(
`filecopy backup already cut: ${snapshot_id}`
);
}
size_bytes = await driver.getDirectoryUsage(snapshot_path);
}
break;
case "restic":
{
const restic = await driver.getResticClient();
const group_by_options = ["--group-by", "host,paths,tags"];
let snapshot_exists = false;
// --tag specified multiple times is OR logic, comma-separated is AND logic
let base_tag_option = `source=democratic-csi`;
base_tag_option += `,csi_volume_id=${source_volume_id}`;
if (instance_id) {
base_tag_option += `csi_instance_id=${instance_id}`;
}
let options = [];
/**
* ensure repo has been initted
*
* it is expected that at a minimum the following env vars are set
* RESTIC_PASSWORD
* RESTIC_REPOSITORY
*/
options = [];
await restic.init();
// see if snapshot already exist with matching tags, etc
options = [
"--path",
volume_path.replace(/\/$/, ""),
"--host",
restic.hostname,
];
// when searching for existing snapshot include name
response = await restic.snapshots(
options
.concat(group_by_options)
.concat(["--tag", base_tag_option + `,csi_snapshot_name=${name}`])
);
if (response.length > 0) {
snapshot_exists = true;
const snapshot = response[response.length - 1];
driver.ctx.logger.debug(
`restic backup already cut: ${snapshot.id}`
);
const stats = await restic.stats([snapshot.id]);
snapshot_id = snapshot.id;
snapshot_date = new Date(snapshot.time);
size_bytes = stats.total_size;
}
if (!snapshot_exists) {
// --no-scan do not run scanner to estimate size of backup
// -x, --one-file-system exclude other file systems, don't cross filesystem boundaries and subvolumes
options = [
"--host",
restic.hostname,
"--one-file-system",
//"--no-scan",
];
// backup with minimal tags to ensure a sane parent for the volume (since tags are included in group_by)
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
response = await restic
.backup(
volume_path,
options
.concat(group_by_options)
.concat(["--tag", base_tag_option])
)
.finally(() => {
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
});
response.parsed.reverse();
let summary = response.parsed.find((message) => {
return message.message_type == "summary";
});
snapshot_id = summary.snapshot_id;
driver.ctx.logger.info(
`restic backup finished: snapshot_id=${snapshot_id}, path=${volume_path}, total_duration=${
summary.total_duration | 0
}s`
);
const stats = await restic.stats([snapshot_id]);
size_bytes = stats.total_size;
// only apply these tags at creation, do NOT use for search above etc
let add_tags = `csi_snapshot_name=${name}`;
let config_tags = _.get(
driver.options[config_key],
"snapshots.restic.tags",
[]
);
if (config_tags.length > 0) {
add_tags += `,${config_tags.join(",")}`;
}
await restic.tag([
"--path",
volume_path.replace(/\/$/, ""),
"--host",
restic.hostname,
"--add",
add_tags,
snapshot_id,
]);
// this is ugly, the tag operation should output the new id, so we
// must resort to full query of all snapshots for the volume
// find snapshot using `original` id as adding tags creates a new id
options = [
"--path",
volume_path.replace(/\/$/, ""),
"--host",
restic.hostname,
];
response = await restic.snapshots(
options
.concat(group_by_options)
.concat([
"--tag",
`${base_tag_option},csi_snapshot_name=${name}`,
])
);
let original_snapshot_id = snapshot_id;
let snapshot = response.find((snapshot) => {
return snapshot.original == original_snapshot_id;
});
if (!snapshot) {
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to find snapshot post-tag operation: snapshot_id=${original_snapshot_id}`
);
}
snapshot_id = snapshot.id;
driver.ctx.logger.info(
`restic backup successfully applied additional tags: new_snapshot_id=${snapshot_id}, original_snapshot_id=${original_snapshot_id} path=${volume_path}`
);
}
}
break;
case "kopia":
{
const kopia = await driver.getKopiaClient();
let options = [];
let snapshot_exists = false;
// --tags specified multiple times means snapshot must contain ALL supplied tags
let tags = [];
tags.push(`source:democratic-csi`);
tags.push(`csi_volume_id:${source_volume_id}`);
if (instance_id) {
tags.push(`csi_instance_id:${instance_id}`);
}
tags.push(`csi_snapshot_name:${name}`);
options = ["--no-storage-stats", "--no-delta"];
tags.forEach((item) => {
options.push("--tags", item);
});
options.push(
`${kopia.username}@${kopia.hostname}:${volume_path.replace(
/\/$/,
""
)}`
);
response = await kopia.snapshotList(options);
if (response.length > 0) {
snapshot_exists = true;
const snapshot = response[response.length - 1];
driver.ctx.logger.debug(
`kopia snapshot already cut: ${snapshot.id}`
);
snapshot_id = snapshot.id;
snapshot_date = new Date(snapshot.startTime); // maybe use endTime?
size_bytes = snapshot.stats.totalSize;
}
if (!snapshot_exists) {
// create snapshot
options = [];
tags.forEach((item) => {
options.push("--tags", item);
});
options.push(volume_path);
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
response = await kopia.snapshotCreate(options).finally(() => {
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
});
snapshot_id = response.id;
snapshot_date = new Date(response.startTime); // maybe use endTime?
let snapshot_end_date = new Date(response.endTime);
let total_duration =
Math.abs(snapshot_end_date.getTime() - snapshot_date.getTime()) /
1000;
size_bytes = response.rootEntry.summ.size;
driver.ctx.logger.info(
`kopia backup finished: snapshot_id=${snapshot_id}, path=${volume_path}, total_duration=${
total_duration | 0
}s`
);
}
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unknown snapthot driver: ${snapshot_driver}`
);
// do NOT overwrite existing snapshot
if (!(await driver.directoryExists(snapshot_path))) {
await driver.cloneDir(volume_path, snapshot_path);
}
let size_bytes = await driver.getDirectoryUsage(snapshot_path);
return {
snapshot: {
/**
@ -1284,17 +766,14 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
* is needed to create a volume from this snapshot.
*/
size_bytes,
snapshot_id: new URLSearchParams({
snapshot_driver,
snapshot_id,
}).toString(),
snapshot_id,
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: Math.round(snapshot_date.getTime() / 1000),
seconds: Math.round(new Date().getTime() / 1000),
nanos: 0,
},
ready_to_use,
ready_to_use: true,
},
};
}
@ -1308,11 +787,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
async DeleteSnapshot(call) {
const driver = this;
let snapshot_id = call.request.snapshot_id;
let snapshot_driver;
const config_key = driver.getConfigKey();
const instance_id = driver.options.instance_id;
let response;
const snapshot_id = call.request.snapshot_id;
if (!snapshot_id) {
throw new GrpcError(
@ -1321,70 +796,8 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
);
}
// get parsed variant of driver to allow snapshotter to work with all
// drivers simultaneously
const parsed_snapshot_id = new URLSearchParams(snapshot_id);
if (parsed_snapshot_id.get("snapshot_driver")) {
snapshot_id = parsed_snapshot_id.get("snapshot_id");
snapshot_driver = parsed_snapshot_id.get("snapshot_driver");
} else {
snapshot_driver = "filecopy";
}
switch (snapshot_driver) {
case "filecopy":
{
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.deleteDir(snapshot_path);
}
break;
case "restic":
{
let prune = _.get(
driver.options[config_key],
"snapshots.restic.prune",
false
);
if (typeof prune != "boolean") {
prune = String(prune);
if (["true", "yes", "1"].includes(prune.toLowerCase())) {
prune = true;
} else {
prune = false;
}
}
const restic = await driver.getResticClient();
let options = [];
await restic.init();
// we preempt with this check to prevent locking the repo when snapshot does not exist
const snapshot_exists = await restic.snapshot_exists(snapshot_id);
if (snapshot_exists) {
options = [];
if (prune) {
options.push("--prune");
}
options.push(snapshot_id);
await restic.forget(options);
}
}
break;
case "kopia":
{
const kopia = await driver.getKopiaClient();
let options = [snapshot_id];
await kopia.snapshotDelete(options);
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unknown snapthot driver: ${snapshot_driver}`
);
}
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.deleteDir(snapshot_path);
return {};
}

View File

@ -44,11 +44,12 @@ class ControllerLocalHostpathDriver extends ControllerClientCommonDriver {
return "local-hostpath";
}
getVolumeContext(volume_id) {
getVolumeContext(name) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "hostpath",
path: driver.getShareVolumePath(volume_id),
path: driver.getShareVolumePath(name),
};
}

View File

@ -13,13 +13,13 @@ class ControllerLustreClientDriver extends ControllerClientCommonDriver {
return "lustre";
}
getVolumeContext(volume_id) {
getVolumeContext(name) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "lustre",
server: this.options[config_key].shareHost,
share: driver.getShareVolumePath(volume_id),
share: driver.getShareVolumePath(name),
};
}

View File

@ -13,13 +13,13 @@ class ControllerNfsClientDriver extends ControllerClientCommonDriver {
return "nfs";
}
getVolumeContext(volume_id) {
getVolumeContext(name) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "nfs",
server: this.options[config_key].shareHost,
share: driver.getShareVolumePath(volume_id),
share: driver.getShareVolumePath(name),
};
}

View File

@ -1,670 +0,0 @@
const _ = require("lodash");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const GeneralUtils = require("../../utils/general");
const { ObjectiveFS } = require("../../utils/objectivefs");
const semver = require("semver");
const uuidv4 = require("uuid").v4;
const __REGISTRY_NS__ = "ControllerZfsLocalDriver";
const MAX_VOLUME_NAME_LENGTH = 63;
class ControllerObjectiveFSDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
//"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc.push(
"SINGLE_NODE_MULTI_WRITER"
);
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
async getObjectiveFSClient() {
const driver = this;
return this.ctx.registry.getAsync(
`${__REGISTRY_NS__}:objectivefsclient`,
async () => {
const options = {};
options.sudo = _.get(
driver.options,
"objectivefs.cli.sudoEnabled",
false
);
options.pool = _.get(driver.options, "objectivefs.pool");
return new ObjectiveFS({
...options,
env: _.get(driver.options, "objectivefs.env", {}),
});
}
);
}
/**
*
* @returns Array
*/
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
getFsTypes() {
return ["fuse.objectivefs", "objectivefs"];
}
assertCapabilities(capabilities) {
const driver = this;
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
let fs_types = driver.getFsTypes();
const valid = capabilities.every((capability) => {
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!fs_types.includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
!this.getAccessModes(capability).includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
});
return { valid, message };
}
async getVolumeStatus(entry) {
const driver = this;
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
const volume_id = entry.NAME.replace(object_store, "").split("/")[1];
if (!!!semver.satisfies(driver.ctx.csiVersion, ">=1.2.0")) {
return;
}
let abnormal = false;
let message = "OK";
let volume_status = {};
//LIST_VOLUMES_PUBLISHED_NODES
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.2.0") &&
driver.options.service.controller.capabilities.rpc.includes(
"LIST_VOLUMES_PUBLISHED_NODES"
)
) {
// TODO: let drivers fill this in
volume_status.published_node_ids = [];
}
//VOLUME_CONDITION
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") &&
driver.options.service.controller.capabilities.rpc.includes(
"VOLUME_CONDITION"
)
) {
// TODO: let drivers fill ths in
volume_condition = { abnormal, message };
volume_status.volume_condition = volume_condition;
}
return volume_status;
}
async populateCsiVolumeFromData(entry) {
const driver = this;
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
let filesystem = entry.NAME.replace(object_store, "");
let volume_content_source;
let volume_context = {
provisioner_driver: driver.options.driver,
node_attach_driver: "objectivefs",
filesystem,
object_store,
"env.OBJECTSTORE": object_store,
};
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
let accessible_topology;
let volume = {
volume_id: filesystem.split("/")[1],
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
accessible_topology,
};
return volume;
}
/**
* Ensure sane options are used etc
* true = ready
* false = not ready, but progressiong towards ready
* throw error = faulty setup
*
* @param {*} call
*/
async Probe(call) {
const driver = this;
const pool = _.get(driver.options, "objectivefs.pool");
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
if (driver.ctx.args.csiMode.includes("controller")) {
if (!pool) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`objectivefs.pool not configured`
);
}
if (!object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`env.OBJECTSTORE not configured`
);
}
return { ready: { value: true } };
} else {
return { ready: { value: true } };
}
}
/**
* Create an objectivefs filesystem as a new volume
*
* @param {*} call
*/
async CreateVolume(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
const parameters = call.request.parameters;
if (!pool) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`objectivefs.pool not configured`
);
}
if (!object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`env.OBJECTSTORE not configured`
);
}
const context_env = {};
for (const key in parameters) {
if (key.startsWith("env.")) {
context_env[key] = parameters[key];
}
}
context_env["env.OBJECTSTORE"] = object_store;
// filesystem names are always lower-cased by ofs
let volume_id = await driver.getVolumeIdFromCall(call);
let volume_content_source = call.request.volume_content_source;
volume_id = volume_id.toLowerCase();
const filesystem = `${pool}/${volume_id}`;
if (volume_id.length >= MAX_VOLUME_NAME_LENGTH) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`derived volume_id ${volume_id} is too long for objectivefs`
);
}
if (
call.request.volume_capabilities &&
call.request.volume_capabilities.length > 0
) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
} else {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
"missing volume_capabilities"
);
}
if (
!call.request.capacity_range ||
Object.keys(call.request.capacity_range).length === 0
) {
call.request.capacity_range = {
required_bytes: 1073741824, // meaningless
};
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
if (volume_content_source) {
//should never happen, cannot clone with this driver
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`cloning is not enabled`
);
}
await ofsClient.create({}, filesystem, ["-f"]);
let volume_context = {
provisioner_driver: driver.options.driver,
node_attach_driver: "objectivefs",
filesystem,
...context_env,
};
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* Delete a volume
*
* Deleting a volume consists of the following steps:
* 1. delete directory
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
let volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
volume_id = volume_id.toLowerCase();
const filesystem = `${pool}/${volume_id}`;
await ofsClient.destroy({}, filesystem, []);
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
let entries = [];
let entries_length = 0;
let next_token;
let uuid;
let response;
const max_entries = call.request.max_entries;
const starting_token = call.request.starting_token;
// get data from cache and return immediately
if (starting_token) {
let parts = starting_token.split(":");
uuid = parts[0];
let start_position = parseInt(parts[1]);
let end_position;
if (max_entries > 0) {
end_position = start_position + max_entries;
}
entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`);
if (entries) {
entries_length = entries.length;
entries = entries.slice(start_position, end_position);
if (max_entries > 0 && end_position > entries_length) {
next_token = `${uuid}:${end_position}`;
} else {
next_token = null;
}
const data = {
entries: entries,
next_token: next_token,
};
return data;
} else {
throw new GrpcError(
grpc.status.ABORTED,
`invalid starting_token: ${starting_token}`
);
}
}
entries = [];
const list_entries = await ofsClient.list({});
for (const entry of list_entries) {
if (entry.KIND != "ofs") {
continue;
}
let volume = await driver.populateCsiVolumeFromData(entry);
if (volume) {
let status = await driver.getVolumeStatus(entry);
entries.push({
volume,
status,
});
}
}
if (max_entries && entries.length > max_entries) {
uuid = uuidv4();
this.ctx.cache.set(`ListVolumes:result:${uuid}`, entries);
next_token = `${uuid}:${max_entries}`;
entries = entries.slice(0, max_entries);
}
const data = {
entries: entries,
next_token: next_token,
};
return data;
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
const volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing volume_id`);
}
const filesystem = `${pool}/${volume_id}`;
const entries = await ofsClient.list({}, filesystem);
const exists = entries.some((entry) => {
return entry.NAME.endsWith(filesystem) && entry.KIND == "ofs";
});
if (!exists) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_id: ${volume_id}`
);
}
const capabilities = call.request.volume_capabilities;
if (!capabilities || capabilities.length === 0) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing capabilities`);
}
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ControllerObjectiveFSDriver = ControllerObjectiveFSDriver;

View File

@ -13,13 +13,13 @@ class ControllerSmbClientDriver extends ControllerClientCommonDriver {
return "smb";
}
getVolumeContext(volume_id) {
getVolumeContext(name) {
const driver = this;
const config_key = driver.getConfigKey();
return {
node_attach_driver: "smb",
server: this.options[config_key].shareHost,
share: driver.stripLeadingSlash(driver.getShareVolumePath(volume_id)),
share: driver.stripLeadingSlash(driver.getShareVolumePath(name)),
};
}

View File

@ -3,6 +3,7 @@ const http = require("http");
const https = require("https");
const { axios_request, stringify } = require("../../../utils/general");
const Mutex = require("async-mutex").Mutex;
const registry = require("../../../utils/registry");
const { GrpcError, grpc } = require("../../../utils/grpc");
const USER_AGENT = "democratic-csi";
@ -94,7 +95,7 @@ class SynologyHttpClient {
}
getHttpAgent() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_agent`, () => {
return registry.get(`${__REGISTRY_NS__}:http_agent`, () => {
return new http.Agent({
keepAlive: true,
maxSockets: Infinity,
@ -104,7 +105,7 @@ class SynologyHttpClient {
}
getHttpsAgent() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:https_agent`, () => {
return registry.get(`${__REGISTRY_NS__}:https_agent`, () => {
return new https.Agent({
keepAlive: true,
maxSockets: Infinity,

View File

@ -3,6 +3,7 @@ const { CsiBaseDriver } = require("../index");
const GeneralUtils = require("../../utils/general");
const { GrpcError, grpc } = require("../../utils/grpc");
const Handlebars = require("handlebars");
const registry = require("../../utils/registry");
const SynologyHttpClient = require("./http").SynologyHttpClient;
const semver = require("semver");
const yaml = require("js-yaml");
@ -114,7 +115,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
}
async getHttpClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_client`, () => {
return registry.get(`${__REGISTRY_NS__}:http_client`, () => {
return new SynologyHttpClient(this.options.httpConnection);
});
}
@ -175,8 +176,8 @@ class ControllerSynologyDriver extends CsiBaseDriver {
}
}
buildIscsiName(volume_id) {
let iscsiName = volume_id;
buildIscsiName(name) {
let iscsiName = name;
if (this.options.iscsi.namePrefix) {
iscsiName = this.options.iscsi.namePrefix + iscsiName;
}
@ -207,49 +208,6 @@ class ControllerSynologyDriver extends CsiBaseDriver {
return location;
}
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
const driverResourceType = this.getDriverResourceType();
switch (driverResourceType) {
case "filesystem":
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
case "volume":
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
];
break;
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
assertCapabilities(capabilities) {
const driverResourceType = this.getDriverResourceType();
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
@ -275,9 +233,16 @@ class ControllerSynologyDriver extends CsiBaseDriver {
}
if (
!this.getAccessModes(capability).includes(
capability.access_mode.mode
)
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
@ -298,9 +263,15 @@ class ControllerSynologyDriver extends CsiBaseDriver {
}
if (
!this.getAccessModes(capability).includes(
capability.access_mode.mode
)
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
@ -323,9 +294,16 @@ class ControllerSynologyDriver extends CsiBaseDriver {
const driver = this;
const httpClient = await driver.getHttpClient();
let volume_id = await driver.getVolumeIdFromCall(call);
let name = call.request.name;
let volume_content_source = call.request.volume_content_source;
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (
call.request.volume_capabilities &&
call.request.volume_capabilities.length > 0
@ -406,7 +384,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
);
break;
case "iscsi":
let iscsiName = driver.buildIscsiName(volume_id);
let iscsiName = driver.buildIscsiName(name);
let lunTemplate;
let targetTemplate;
let data;
@ -662,7 +640,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
const res = {
volume: {
volume_id,
volume_id: name,
capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
content_source: volume_content_source,
volume_context,
@ -681,26 +659,15 @@ class ControllerSynologyDriver extends CsiBaseDriver {
const driver = this;
const httpClient = await driver.getHttpClient();
let volume_id = call.request.volume_id;
let name = call.request.volume_id;
if (!volume_id) {
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
let response;
switch (driver.getDriverShareType()) {
@ -721,7 +688,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
case "iscsi":
//await httpClient.DeleteAllLuns();
let iscsiName = driver.buildIscsiName(volume_id);
let iscsiName = driver.buildIscsiName(name);
let iqn = driver.options.iscsi.baseiqn + iscsiName;
let target = await httpClient.GetTargetByIQN(iqn);
@ -789,9 +756,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
const driver = this;
const httpClient = await driver.getHttpClient();
let volume_id = call.request.volume_id;
let name = call.request.volume_id;
if (!volume_id) {
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
@ -853,7 +820,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
break;
case "iscsi":
node_expansion_required = true;
let iscsiName = driver.buildIscsiName(volume_id);
let iscsiName = driver.buildIscsiName(name);
response = await httpClient.GetLunUUIDByName(iscsiName);
await httpClient.ExpandISCSILun(response, capacity_bytes);

View File

@ -2,41 +2,29 @@ const _ = require("lodash");
const { ControllerZfsBaseDriver } = require("../controller-zfs");
const { GrpcError, grpc } = require("../../utils/grpc");
const GeneralUtils = require("../../utils/general");
const LocalCliExecClient =
require("../../utils/zfs_local_exec_client").LocalCliClient;
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
const registry = require("../../utils/registry");
const SshClient = require("../../utils/ssh").SshClient;
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
const Handlebars = require("handlebars");
const ISCSI_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:iscsi_assets_name";
const NVMEOF_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:nvmeof_assets_name";
const __REGISTRY_NS__ = "ControllerZfsGenericDriver";
class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
getExecClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
if (this.options.sshConnection) {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection,
});
} else {
return new LocalCliExecClient({
logger: this.ctx.logger,
});
}
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection,
});
});
}
async getZetabyte() {
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
const execClient = this.getExecClient();
const options = {};
if (this.options.sshConnection) {
options.executor = new ZfsSshProcessManager(execClient);
} else {
options.executor = execClient;
}
options.executor = new ZfsSshProcessManager(execClient);
options.idempotent = true;
if (
@ -67,7 +55,6 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
case "zfs-generic-smb":
return "filesystem";
case "zfs-generic-iscsi":
case "zfs-generic-nvmeof":
return "volume";
default:
throw new Error("unknown driver: " + this.ctx.args.driver);
@ -177,28 +164,28 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
};
return volume_context;
case "zfs-generic-iscsi": {
case "zfs-generic-iscsi":
let basename;
let assetName;
let iscsiName;
if (this.options.iscsi.nameTemplate) {
assetName = Handlebars.compile(this.options.iscsi.nameTemplate)({
iscsiName = Handlebars.compile(this.options.iscsi.nameTemplate)({
name: call.request.name,
parameters: call.request.parameters,
});
} else {
assetName = zb.helpers.extractLeafName(datasetName);
iscsiName = zb.helpers.extractLeafName(datasetName);
}
if (this.options.iscsi.namePrefix) {
assetName = this.options.iscsi.namePrefix + assetName;
iscsiName = this.options.iscsi.namePrefix + iscsiName;
}
if (this.options.iscsi.nameSuffix) {
assetName += this.options.iscsi.nameSuffix;
iscsiName += this.options.iscsi.nameSuffix;
}
assetName = assetName.toLowerCase();
iscsiName = iscsiName.toLowerCase();
let extentDiskName = "zvol/" + datasetName;
@ -218,22 +205,6 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
basename = this.options.iscsi.shareStrategyTargetCli.basename;
let setAttributesText = "";
let setAuthText = "";
let setBlockAttributesText = "";
if (this.options.iscsi.shareStrategyTargetCli.block) {
if (this.options.iscsi.shareStrategyTargetCli.block.attributes) {
for (const attributeName in this.options.iscsi
.shareStrategyTargetCli.block.attributes) {
const attributeValue =
this.options.iscsi.shareStrategyTargetCli.block.attributes[
attributeName
];
setBlockAttributesText += "\n";
setBlockAttributesText += `set attribute ${attributeName}=${attributeValue}`;
}
}
}
if (this.options.iscsi.shareStrategyTargetCli.tpg) {
if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) {
for (const attributeName in this.options.iscsi
@ -268,22 +239,20 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
`
# create target
cd /iscsi
create ${basename}:${assetName}
create ${basename}:${iscsiName}
# setup tpg
cd /iscsi/${basename}:${assetName}/tpg1
cd /iscsi/${basename}:${iscsiName}/tpg1
${setAttributesText}
${setAuthText}
# create extent
cd /backstores/block
create ${assetName} /dev/${extentDiskName}
cd /backstores/block/${assetName}
${setBlockAttributesText}
create ${iscsiName} /dev/${extentDiskName}
# add extent to target/tpg
cd /iscsi/${basename}:${assetName}/tpg1/luns
create /backstores/block/${assetName}
cd /iscsi/${basename}:${iscsiName}/tpg1/luns
create /backstores/block/${iscsiName}
`
);
},
@ -302,12 +271,12 @@ create /backstores/block/${assetName}
}
// iqn = target
let iqn = basename + ":" + assetName;
let iqn = basename + ":" + iscsiName;
this.ctx.logger.info("iqn: " + iqn);
// store this off to make delete process more bullet proof
await zb.zfs.set(datasetName, {
[ISCSI_ASSETS_NAME_PROPERTY_NAME]: assetName,
[ISCSI_ASSETS_NAME_PROPERTY_NAME]: iscsiName,
});
volume_context = {
@ -321,231 +290,6 @@ create /backstores/block/${assetName}
lun: 0,
};
return volume_context;
}
case "zfs-generic-nvmeof": {
let basename;
let assetName;
if (this.options.nvmeof.nameTemplate) {
assetName = Handlebars.compile(this.options.nvmeof.nameTemplate)({
name: call.request.name,
parameters: call.request.parameters,
});
} else {
assetName = zb.helpers.extractLeafName(datasetName);
}
if (this.options.nvmeof.namePrefix) {
assetName = this.options.nvmeof.namePrefix + assetName;
}
if (this.options.nvmeof.nameSuffix) {
assetName += this.options.nvmeof.nameSuffix;
}
assetName = assetName.toLowerCase();
let extentDiskName = "zvol/" + datasetName;
/**
* limit is a FreeBSD limitation
* https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
*/
//if (extentDiskName.length > 63) {
// throw new GrpcError(
// grpc.status.FAILED_PRECONDITION,
// `extent disk name cannot exceed 63 characters: ${extentDiskName}`
// );
//}
let namespace = 1;
switch (this.options.nvmeof.shareStrategy) {
case "nvmetCli":
{
basename = this.options.nvmeof.shareStrategyNvmetCli.basename;
let savefile = _.get(
this.options,
"nvmeof.shareStrategyNvmetCli.configPath",
""
);
if (savefile) {
savefile = `savefile=${savefile}`;
}
let setSubsystemAttributesText = "";
if (this.options.nvmeof.shareStrategyNvmetCli.subsystem) {
if (
this.options.nvmeof.shareStrategyNvmetCli.subsystem.attributes
) {
for (const attributeName in this.options.nvmeof
.shareStrategyNvmetCli.subsystem.attributes) {
const attributeValue =
this.options.nvmeof.shareStrategyNvmetCli.subsystem
.attributes[attributeName];
setSubsystemAttributesText += "\n";
setSubsystemAttributesText += `set attr ${attributeName}=${attributeValue}`;
}
}
}
let portCommands = "";
this.options.nvmeof.shareStrategyNvmetCli.ports.forEach(
(port) => {
portCommands += `
cd /ports/${port}/subsystems
create ${basename}:${assetName}
`;
}
);
await GeneralUtils.retry(
3,
2000,
async () => {
await this.nvmetCliCommand(
`
# create subsystem
cd /subsystems
create ${basename}:${assetName}
cd ${basename}:${assetName}
${setSubsystemAttributesText}
# create subsystem namespace
cd namespaces
create ${namespace}
cd ${namespace}
set device path=/dev/${extentDiskName}
enable
# associate subsystem/target to port(al)
${portCommands}
saveconfig ${savefile}
`
);
},
{
retryCondition: (err) => {
if (err.stdout && err.stdout.includes("Ran out of input")) {
return true;
}
return false;
},
}
);
}
break;
case "spdkCli":
{
basename = this.options.nvmeof.shareStrategySpdkCli.basename;
let bdevAttributesText = "";
if (this.options.nvmeof.shareStrategySpdkCli.bdev) {
if (this.options.nvmeof.shareStrategySpdkCli.bdev.attributes) {
for (const attributeName in this.options.nvmeof
.shareStrategySpdkCli.bdev.attributes) {
const attributeValue =
this.options.nvmeof.shareStrategySpdkCli.bdev.attributes[
attributeName
];
bdevAttributesText += `${attributeName}=${attributeValue}`;
}
}
}
let subsystemAttributesText = "";
if (this.options.nvmeof.shareStrategySpdkCli.subsystem) {
if (
this.options.nvmeof.shareStrategySpdkCli.subsystem.attributes
) {
for (const attributeName in this.options.nvmeof
.shareStrategySpdkCli.subsystem.attributes) {
const attributeValue =
this.options.nvmeof.shareStrategySpdkCli.subsystem
.attributes[attributeName];
subsystemAttributesText += `${attributeName}=${attributeValue}`;
}
}
}
let listenerCommands = `cd /nvmf/subsystem/${basename}:${assetName}/listen_addresses\n`;
this.options.nvmeof.shareStrategySpdkCli.listeners.forEach(
(listener) => {
let listenerAttributesText = "";
for (const attributeName in listener) {
const attributeValue = listener[attributeName];
listenerAttributesText += ` ${attributeName}=${attributeValue} `;
}
listenerCommands += `
create ${listenerAttributesText}
`;
}
);
await GeneralUtils.retry(
3,
2000,
async () => {
await this.spdkCliCommand(
`
# create bdev
cd /bdevs/${this.options.nvmeof.shareStrategySpdkCli.bdev.type}
create filename=/dev/${extentDiskName} name=${basename}:${assetName} ${bdevAttributesText}
# create subsystem
cd /nvmf/subsystem
create nqn=${basename}:${assetName} ${subsystemAttributesText}
cd ${basename}:${assetName}
# create namespace
cd /nvmf/subsystem/${basename}:${assetName}/namespaces
create bdev_name=${basename}:${assetName} nsid=${namespace}
# add listener
${listenerCommands}
cd /
save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
`
);
},
{
retryCondition: (err) => {
if (err.stdout && err.stdout.includes("Ran out of input")) {
return true;
}
return false;
},
}
);
}
break;
default:
break;
}
// iqn = target
let nqn = basename + ":" + assetName;
this.ctx.logger.info("nqn: " + nqn);
// store this off to make delete process more bullet proof
await zb.zfs.set(datasetName, {
[NVMEOF_ASSETS_NAME_PROPERTY_NAME]: assetName,
});
volume_context = {
node_attach_driver: "nvmeof",
transport: this.options.nvmeof.transport || "",
transports: this.options.nvmeof.transports
? this.options.nvmeof.transports.join(",")
: "",
nqn,
nsid: namespace,
};
return volume_context;
}
default:
throw new GrpcError(
@ -623,9 +367,9 @@ save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
}
break;
case "zfs-generic-iscsi": {
case "zfs-generic-iscsi":
let basename;
let assetName;
let iscsiName;
// Delete iscsi assets
try {
@ -642,23 +386,23 @@ save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
properties = properties[datasetName];
this.ctx.logger.debug("zfs props data: %j", properties);
assetName = properties[ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
iscsiName = properties[ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
if (zb.helpers.isPropertyValueSet(assetName)) {
if (zb.helpers.isPropertyValueSet(iscsiName)) {
//do nothing
} else {
assetName = zb.helpers.extractLeafName(datasetName);
iscsiName = zb.helpers.extractLeafName(datasetName);
if (this.options.iscsi.namePrefix) {
assetName = this.options.iscsi.namePrefix + assetName;
iscsiName = this.options.iscsi.namePrefix + iscsiName;
}
if (this.options.iscsi.nameSuffix) {
assetName += this.options.iscsi.nameSuffix;
iscsiName += this.options.iscsi.nameSuffix;
}
}
assetName = assetName.toLowerCase();
iscsiName = iscsiName.toLowerCase();
switch (this.options.iscsi.shareStrategy) {
case "targetCli":
basename = this.options.iscsi.shareStrategyTargetCli.basename;
@ -670,11 +414,11 @@ save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
`
# delete target
cd /iscsi
delete ${basename}:${assetName}
delete ${basename}:${iscsiName}
# delete extent
cd /backstores/block
delete ${assetName}
delete ${iscsiName}
`
);
},
@ -693,132 +437,6 @@ delete ${assetName}
break;
}
break;
}
case "zfs-generic-nvmeof": {
let basename;
let assetName;
// Delete nvmeof assets
try {
properties = await zb.zfs.get(datasetName, [
NVMEOF_ASSETS_NAME_PROPERTY_NAME,
]);
} catch (err) {
if (err.toString().includes("dataset does not exist")) {
return;
}
throw err;
}
properties = properties[datasetName];
this.ctx.logger.debug("zfs props data: %j", properties);
assetName = properties[NVMEOF_ASSETS_NAME_PROPERTY_NAME].value;
if (zb.helpers.isPropertyValueSet(assetName)) {
//do nothing
} else {
assetName = zb.helpers.extractLeafName(datasetName);
if (this.options.nvmeof.namePrefix) {
assetName = this.options.nvmeof.namePrefix + assetName;
}
if (this.options.nvmeof.nameSuffix) {
assetName += this.options.nvmeof.nameSuffix;
}
}
assetName = assetName.toLowerCase();
switch (this.options.nvmeof.shareStrategy) {
case "nvmetCli":
{
basename = this.options.nvmeof.shareStrategyNvmetCli.basename;
let savefile = _.get(
this.options,
"nvmeof.shareStrategyNvmetCli.configPath",
""
);
if (savefile) {
savefile = `savefile=${savefile}`;
}
let portCommands = "";
this.options.nvmeof.shareStrategyNvmetCli.ports.forEach(
(port) => {
portCommands += `
cd /ports/${port}/subsystems
delete ${basename}:${assetName}
`;
}
);
await GeneralUtils.retry(
3,
2000,
async () => {
await this.nvmetCliCommand(
`
# delete subsystem from port
${portCommands}
# delete subsystem
cd /subsystems
delete ${basename}:${assetName}
saveconfig ${savefile}
`
);
},
{
retryCondition: (err) => {
if (err.stdout && err.stdout.includes("Ran out of input")) {
return true;
}
return false;
},
}
);
}
break;
case "spdkCli":
{
basename = this.options.nvmeof.shareStrategySpdkCli.basename;
await GeneralUtils.retry(
3,
2000,
async () => {
await this.spdkCliCommand(
`
# delete subsystem
cd /nvmf/subsystem/
delete subsystem_nqn=${basename}:${assetName}
# delete bdev
cd /bdevs/${this.options.nvmeof.shareStrategySpdkCli.bdev.type}
delete name=${basename}:${assetName}
cd /
save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
`
);
},
{
retryCondition: (err) => {
if (err.stdout && err.stdout.includes("Ran out of input")) {
return true;
}
return false;
},
}
);
}
break;
default:
break;
}
break;
}
default:
throw new GrpcError(
@ -859,18 +477,18 @@ save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
let command = "sh";
let args = ["-c"];
let cliArgs = ["targetcli"];
let targetCliArgs = ["targetcli"];
if (
_.get(this.options, "iscsi.shareStrategyTargetCli.sudoEnabled", false)
) {
cliArgs.unshift("sudo");
targetCliArgs.unshift("sudo");
}
let cliCommand = [];
cliCommand.push(`echo "${data}"`.trim());
cliCommand.push("|");
cliCommand.push(cliArgs.join(" "));
args.push("'" + cliCommand.join(" ") + "'");
let targetCliCommand = [];
targetCliCommand.push(`echo "${data}"`.trim());
targetCliCommand.push("|");
targetCliCommand.push(targetCliArgs.join(" "));
args.push("'" + targetCliCommand.join(" ") + "'");
let logCommandTmp = command + " " + args.join(" ");
let logCommand = "";
@ -909,151 +527,6 @@ save_config filename=${this.options.nvmeof.shareStrategySpdkCli.configPath}
}
return response;
}
async nvmetCliCommand(data) {
const execClient = this.getExecClient();
const driver = this;
if (
_.get(
this.options,
"nvmeof.shareStrategyNvmetCli.configIsImportedFilePath"
)
) {
try {
let response = await execClient.exec(
execClient.buildCommand("test", [
"-f",
_.get(
this.options,
"nvmeof.shareStrategyNvmetCli.configIsImportedFilePath"
),
])
);
} catch (err) {
throw new Error("nvmet has not been fully configured");
}
}
data = data.trim();
let command = "sh";
let args = ["-c"];
let cliArgs = [
_.get(
this.options,
"nvmeof.shareStrategyNvmetCli.nvmetcliPath",
"nvmetcli"
),
];
if (
_.get(this.options, "nvmeof.shareStrategyNvmetCli.sudoEnabled", false)
) {
cliArgs.unshift("sudo");
}
let cliCommand = [];
cliCommand.push(`echo "${data}"`.trim());
cliCommand.push("|");
cliCommand.push(cliArgs.join(" "));
args.push("'" + cliCommand.join(" ") + "'");
let logCommandTmp = command + " " + args.join(" ");
let logCommand = "";
logCommandTmp.split("\n").forEach((line) => {
if (line.startsWith("set auth password=")) {
logCommand += "set auth password=<redacted>";
} else if (line.startsWith("set auth mutual_password=")) {
logCommand += "set auth mutual_password=<redacted>";
} else {
logCommand += line;
}
logCommand += "\n";
});
driver.ctx.logger.verbose("nvmetCLI command: " + logCommand);
//process.exit(0);
// https://github.com/democratic-csi/democratic-csi/issues/127
// https://bugs.launchpad.net/ubuntu/+source/python-configshell-fb/+bug/1776761
// can apply the linked patch with some modifications to overcome the
// KeyErrors or we can simply start a fake tty which does not seem to have
// a detrimental effect, only affects Ubuntu 18.04 and older
let options = {
pty: true,
};
let response = await execClient.exec(
execClient.buildCommand(command, args),
options
);
driver.ctx.logger.verbose("nvmetCLI response: " + JSON.stringify(response));
if (response.code != 0) {
throw response;
}
return response;
}
async spdkCliCommand(data) {
const execClient = this.getExecClient();
const driver = this;
data = data.trim();
let command = "sh";
let args = ["-c"];
let cliArgs = [
_.get(this.options, "nvmeof.shareStrategySpdkCli.spdkcliPath", "spdkcli"),
];
if (_.get(this.options, "nvmeof.shareStrategySpdkCli.sudoEnabled", false)) {
cliArgs.unshift("sudo");
}
let cliCommand = [];
cliCommand.push(`echo "${data}"`.trim());
cliCommand.push("|");
cliCommand.push(cliArgs.join(" "));
args.push("'" + cliCommand.join(" ") + "'");
let logCommandTmp = command + " " + args.join(" ");
let logCommand = "";
logCommandTmp.split("\n").forEach((line) => {
if (line.startsWith("set auth password=")) {
logCommand += "set auth password=<redacted>";
} else if (line.startsWith("set auth mutual_password=")) {
logCommand += "set auth mutual_password=<redacted>";
} else {
logCommand += line;
}
logCommand += "\n";
});
driver.ctx.logger.verbose("spdkCLI command: " + logCommand);
//process.exit(0);
// https://github.com/democratic-csi/democratic-csi/issues/127
// https://bugs.launchpad.net/ubuntu/+source/python-configshell-fb/+bug/1776761
// can apply the linked patch with some modifications to overcome the
// KeyErrors or we can simply start a fake tty which does not seem to have
// a detrimental effect, only affects Ubuntu 18.04 and older
let options = {
pty: true,
};
let response = await execClient.exec(
execClient.buildCommand(command, args),
options
);
driver.ctx.logger.verbose("spdkCLI response: " + JSON.stringify(response));
if (response.code != 0) {
throw response;
}
return response;
}
}
module.exports.ControllerZfsGenericDriver = ControllerZfsGenericDriver;

View File

@ -2,8 +2,8 @@ const _ = require("lodash");
const { ControllerZfsBaseDriver } = require("../controller-zfs");
const { GrpcError, grpc } = require("../../utils/grpc");
const GeneralUtils = require("../../utils/general");
const LocalCliExecClient =
require("../../utils/zfs_local_exec_client").LocalCliClient;
const LocalCliExecClient = require("./exec").LocalCliClient;
const registry = require("../../utils/registry");
const { Zetabyte } = require("../../utils/zfs");
const ZFS_ASSET_NAME_PROPERTY_NAME = "zfs_asset_name";
@ -32,7 +32,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
}
getExecClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return new LocalCliExecClient({
logger: this.ctx.logger,
});
@ -40,7 +40,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
}
async getZetabyte() {
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
const execClient = this.getExecClient();
const options = {};
@ -109,16 +109,15 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
*
* @returns Array
*/
getAccessModes(capability) {
getAccessModes() {
const driverZfsResourceType = this.getDriverZfsResourceType();
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
const driverZfsResourceType = this.getDriverZfsResourceType();
switch (driverZfsResourceType) {
case "filesystem":
access_modes = [
return [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
@ -128,9 +127,8 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
case "volume":
access_modes = [
return [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
@ -140,17 +138,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
/**

View File

@ -39,7 +39,7 @@ const MAX_ZVOL_NAME_LENGTH_CACHE_KEY = "controller-zfs:max_zvol_name_length";
* - async setZetabyteCustomOptions(options) // optional
* - getDriverZfsResourceType() // return "filesystem" or "volume"
* - getFSTypes() // optional
* - getAccessModes(capability) // optional
* - getAccessModes() // optional
* - async getAccessibleTopology() // optional
* - async createShare(call, datasetName) // return appropriate volume_context for Node operations
* - async deleteShare(call, datasetName) // no return expected
@ -207,16 +207,11 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
}
}
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
getAccessModes() {
const driverZfsResourceType = this.getDriverZfsResourceType();
switch (driverZfsResourceType) {
case "filesystem":
access_modes = [
return [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
@ -226,9 +221,8 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
case "volume":
access_modes = [
return [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
@ -237,17 +231,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
];
break;
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
assertCapabilities(capabilities) {
@ -272,11 +256,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
return false;
}
if (
!this.getAccessModes(capability).includes(
capability.access_mode.mode
)
) {
if (!this.getAccessModes().includes(capability.access_mode.mode)) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
@ -293,11 +273,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
}
}
if (
!this.getAccessModes(capability).includes(
capability.access_mode.mode
)
) {
if (!this.getAccessModes().includes(capability.access_mode.mode)) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
@ -617,9 +593,9 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
);
}
return super.Probe(...arguments);
return { ready: { value: true } };
} else {
return super.Probe(...arguments);
return { ready: { value: true } };
}
}
@ -644,7 +620,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
let name = call.request.name;
let volume_id = await driver.getVolumeIdFromCall(call);
let volume_id = await driver.getVolumeIdFromName(name);
let volume_content_source = call.request.volume_content_source;
if (!datasetParentName) {
@ -654,6 +630,13 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (
call.request.volume_capabilities &&
call.request.volume_capabilities.length > 0
@ -1190,30 +1173,11 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
// this should be already set, but when coming from a volume source
// it may not match that of the source
// TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured
properties.volsize = capacity_bytes;
// dedup
// on, off, verify
// zfs set dedup=on tank/home
// restore default must use the below
// zfs inherit [-rS] property filesystem|volume|snapshot…
if (
(typeof this.options.zfs.zvolDedup === "string" ||
this.options.zfs.zvolDedup instanceof String) &&
this.options.zfs.zvolDedup.length > 0
) {
properties.dedup = this.options.zfs.zvolDedup;
}
// compression
// lz4, gzip-9, etc
if (
(typeof this.options.zfs.zvolCompression === "string" ||
this.options.zfs.zvolCompression instanceof String) &&
this.options.zfs.zvolCompression > 0
) {
properties.compression = this.options.zfs.zvolCompression;
}
//dedup
//compression
if (setProps) {
await zb.zfs.set(datasetName, properties);
@ -1316,17 +1280,6 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
driver.ctx.logger.debug("dataset properties: %j", properties);
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
// remove share resources
await this.deleteShare(call, datasetName);

View File

@ -12,7 +12,6 @@ const {
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
const { ControllerSmbClientDriver } = require("./controller-smb-client");
const { ControllerLustreClientDriver } = require("./controller-lustre-client");
const { ControllerObjectiveFSDriver } = require("./controller-objectivefs");
const { ControllerSynologyDriver } = require("./controller-synology");
const { NodeManualDriver } = require("./node-manual");
@ -36,7 +35,6 @@ function factory(ctx, options) {
case "zfs-generic-nfs":
case "zfs-generic-smb":
case "zfs-generic-iscsi":
case "zfs-generic-nvmeof":
return new ControllerZfsGenericDriver(ctx, options);
case "zfs-local-dataset":
case "zfs-local-zvol":
@ -51,8 +49,6 @@ function factory(ctx, options) {
return new ControllerLocalHostpathDriver(ctx, options);
case "lustre-client":
return new ControllerLustreClientDriver(ctx, options);
case "objectivefs":
return new ControllerObjectiveFSDriver(ctx, options);
case "node-manual":
return new NodeManualDriver(ctx, options);
default:

View File

@ -4,6 +4,7 @@ const { CsiBaseDriver } = require("../index");
const HttpClient = require("./http").Client;
const TrueNASApiClient = require("./http/api").Api;
const { Zetabyte } = require("../../utils/zfs");
const registry = require("../../utils/registry");
const GeneralUtils = require("../../utils/general");
const Handlebars = require("handlebars");
@ -155,7 +156,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
* @returns
*/
async getZetabyte() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
return registry.get(`${__REGISTRY_NS__}:zb`, () => {
return new Zetabyte({
executor: {
spawn: function () {
@ -182,17 +183,8 @@ class FreeNASApiDriver extends CsiBaseDriver {
const apiVersion = httpClient.getApiVersion();
const zb = await this.getZetabyte();
const truenasVersion = semver.coerce(
await httpApiClient.getSystemVersionMajorMinor(),
{ loose: true }
await httpApiClient.getSystemVersionMajorMinor()
);
if (!truenasVersion) {
throw new GrpcError(
grpc.status.UNKNOWN,
`unable to detect TrueNAS version`
);
}
const isScale = await httpApiClient.getIsScale();
let volume_context;
@ -273,11 +265,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
break;
}
if (isScale && semver.satisfies(truenasVersion, ">=23.10")) {
delete share.quiet;
delete share.nfs_quiet;
}
if (isScale && semver.satisfies(truenasVersion, ">=22.12")) {
share.path = share.paths[0];
delete share.paths;
@ -693,7 +680,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
// According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.'
// https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name
// https://tools.ietf.org/html/rfc3720
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
iscsiName = iscsiName.toLowerCase();
let extentDiskName = "zvol/" + datasetName;
@ -711,14 +697,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
);
}
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
if (isScale && iscsiName.length > 64) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`extent name cannot exceed 64 characters: ${iscsiName}`
);
}
this.ctx.logger.info(
"FreeNAS creating iscsi assets with name: " + iscsiName
);
@ -2016,7 +1994,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
}
async getHttpClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_client`, () => {
return registry.get(`${__REGISTRY_NS__}:http_client`, () => {
const client = new HttpClient(this.options.httpConnection);
client.logger = this.ctx.logger;
client.setApiVersion(2); // requires version 2
@ -2033,55 +2011,12 @@ class FreeNASApiDriver extends CsiBaseDriver {
}
async getTrueNASHttpApiClient() {
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
return registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
const httpClient = await this.getHttpClient();
return new TrueNASApiClient(httpClient, this.ctx.cache);
});
}
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
const driverZfsResourceType = this.getDriverZfsResourceType();
switch (driverZfsResourceType) {
case "filesystem":
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
break;
case "volume":
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
];
break;
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
assertCapabilities(capabilities) {
const driverZfsResourceType = this.getDriverZfsResourceType();
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
@ -2105,9 +2040,16 @@ class FreeNASApiDriver extends CsiBaseDriver {
}
if (
!this.getAccessModes(capability).includes(
capability.access_mode.mode
)
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
@ -2128,9 +2070,15 @@ class FreeNASApiDriver extends CsiBaseDriver {
}
if (
!this.getAccessModes(capability).includes(
capability.access_mode.mode
)
![
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
].includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
@ -2195,15 +2143,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
);
}
try {
await httpApiClient.getSystemVersion();
} catch (err) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`TrueNAS api is unavailable: ${String(err)}`
);
}
if (!(await httpApiClient.getIsScale())) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
@ -2211,9 +2150,9 @@ class FreeNASApiDriver extends CsiBaseDriver {
);
}
return super.Probe(...arguments);
return { ready: { value: true } };
} else {
return super.Probe(...arguments);
return { ready: { value: true } };
}
}
@ -2238,7 +2177,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
let name = call.request.name;
let volume_id = await driver.getVolumeIdFromCall(call);
let volume_id = await driver.getVolumeIdFromName(name);
let volume_content_source = call.request.volume_content_source;
let minimum_volume_size = await driver.getMinimumVolumeSize();
let default_required_bytes = 1073741824;
@ -2250,6 +2189,13 @@ class FreeNASApiDriver extends CsiBaseDriver {
);
}
if (!name) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
if (
call.request.volume_capabilities &&
call.request.volume_capabilities.length > 0
@ -2925,30 +2871,11 @@ class FreeNASApiDriver extends CsiBaseDriver {
// this should be already set, but when coming from a volume source
// it may not match that of the source
// TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured
properties.volsize = capacity_bytes;
// dedup
// on, off, verify
// zfs set dedup=on tank/home
// restore default must use the below
// zfs inherit [-rS] property filesystem|volume|snapshot…
if (
(typeof this.options.zfs.zvolDedup === "string" ||
this.options.zfs.zvolDedup instanceof String) &&
this.options.zfs.zvolDedup.length > 0
) {
properties.dedup = this.options.zfs.zvolDedup;
}
// compression
// lz4, gzip-9, etc
if (
(typeof this.options.zfs.zvolCompression === "string" ||
this.options.zfs.zvolCompression instanceof String) &&
this.options.zfs.zvolCompression > 0
) {
properties.compression = this.options.zfs.zvolCompression;
}
//dedup
//compression
if (setProps) {
await httpApiClient.DatasetSet(datasetName, properties);
@ -3047,17 +2974,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
driver.ctx.logger.debug("dataset properties: %j", properties);
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
// remove share resources
await this.deleteShare(call, datasetName);

View File

@ -1,4 +1,4 @@
const registry = require("../../../utils/registry");
const { sleep, stringify } = require("../../../utils/general");
const { Zetabyte } = require("../../../utils/zfs");
@ -22,7 +22,7 @@ class Api {
* @returns
*/
async getZetabyte() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
return registry.get(`${__REGISTRY_NS__}:zb`, () => {
return new Zetabyte({
executor: {
spawn: function () {
@ -119,11 +119,7 @@ class Api {
return 2;
}
if (systemVersion.v1) {
return 1;
}
return 2;
return 1;
}
async getIsFreeNAS() {
@ -243,7 +239,7 @@ class Api {
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
*/
try {
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
response = await httpClient.get(endpoint);
versionResponses.v2 = response;
if (response.statusCode == 200) {
versionInfo.v2 = response.body;
@ -267,7 +263,7 @@ class Api {
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
*/
try {
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
response = await httpClient.get(endpoint);
versionResponses.v1 = response;
if (response.statusCode == 200 && IsJsonString(response.body)) {
versionInfo.v1 = response.body;
@ -702,16 +698,15 @@ class Api {
// wait for job to finish
do {
currentTime = Date.now() / 1000;
if (timeout > 0 && currentTime > startTime + timeout) {
throw new Error("timeout waiting for job to complete");
}
if (job) {
await sleep(check_interval);
}
job = await this.CoreGetJobs({ id: job_id });
job = job[0];
currentTime = Date.now() / 1000;
if (timeout > 0 && currentTime > startTime + timeout) {
throw new Error("timeout waiting for job to complete");
}
} while (!["SUCCESS", "ABORTED", "FAILED"].includes(job.state));
return job;

View File

@ -12,7 +12,7 @@ class Client {
// default to v1.0 for now
if (!this.options.apiVersion) {
this.options.apiVersion = 2;
this.options.apiVersion = 1;
}
}
@ -131,33 +131,25 @@ class Client {
delete options.httpAgent;
delete options.httpsAgent;
let duration = parseFloat(
Math.round((_.get(response, "duration", 0) + Number.EPSILON) * 100) /
100 /
1000
).toFixed(2);
this.logger.debug("FREENAS HTTP REQUEST DETAILS: " + stringify(options));
this.logger.debug("FREENAS HTTP REQUEST DURATION: " + duration + "s");
this.logger.debug("FREENAS HTTP REQUEST: " + stringify(options));
this.logger.debug("FREENAS HTTP ERROR: " + error);
this.logger.debug(
"FREENAS HTTP RESPONSE STATUS CODE: " + _.get(response, "statusCode", "")
"FREENAS HTTP STATUS: " + _.get(response, "statusCode", "")
);
this.logger.debug(
"FREENAS HTTP RESPONSE HEADERS: " +
stringify(_.get(response, "headers", ""))
"FREENAS HTTP HEADERS: " + stringify(_.get(response, "headers", ""))
);
this.logger.debug("FREENAS HTTP RESPONSE BODY: " + stringify(body));
this.logger.debug("FREENAS HTTP BODY: " + stringify(body));
}
async get(endpoint, data, options = {}) {
async get(endpoint, data) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
const options = client.getRequestCommonOptions();
options.method = "GET";
options.url = this.getBaseURL() + endpoint;
options.params = data;
@ -172,14 +164,14 @@ class Client {
});
}
async post(endpoint, data, options = {}) {
async post(endpoint, data) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
const options = client.getRequestCommonOptions();
options.method = "POST";
options.url = this.getBaseURL() + endpoint;
options.data = data;
@ -195,14 +187,14 @@ class Client {
});
}
async put(endpoint, data, options = {}) {
async put(endpoint, data) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
const options = client.getRequestCommonOptions();
options.method = "PUT";
options.url = this.getBaseURL() + endpoint;
options.data = data;
@ -218,14 +210,14 @@ class Client {
});
}
async delete(endpoint, data, options = {}) {
async delete(endpoint, data) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
options = { ...client.getRequestCommonOptions(), ...options };
const options = client.getRequestCommonOptions();
options.method = "DELETE";
options.url = this.getBaseURL() + endpoint;
options.data = data;

View File

@ -1,7 +1,8 @@
const _ = require("lodash");
const { ControllerZfsBaseDriver } = require("../controller-zfs");
const { GrpcError, grpc } = require("../../utils/grpc");
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
const registry = require("../../utils/registry");
const SshClient = require("../../utils/ssh").SshClient;
const HttpClient = require("./http").Client;
const TrueNASApiClient = require("./http/api").Api;
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
@ -27,36 +28,8 @@ const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
const __REGISTRY_NS__ = "FreeNASSshDriver";
class FreeNASSshDriver extends ControllerZfsBaseDriver {
/**
* Ensure sane options are used etc
* true = ready
* false = not ready, but progressiong towards ready
* throw error = faulty setup
*
* @param {*} call
*/
async Probe(call) {
const driver = this;
if (driver.ctx.args.csiMode.includes("controller")) {
const httpApiClient = await driver.getTrueNASHttpApiClient();
try {
await httpApiClient.getSystemVersion();
} catch (err) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`TrueNAS api is unavailable: ${String(err)}`
);
}
return super.Probe(...arguments);
} else {
return super.Probe(...arguments);
}
}
getExecClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection,
@ -65,7 +38,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
}
async getZetabyte() {
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
const sshClient = this.getExecClient();
const options = {};
options.executor = new ZfsSshProcessManager(sshClient);
@ -125,7 +98,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
async getHttpClient(autoDetectVersion = true) {
const autodetectkey = autoDetectVersion === true ? 1 : 0;
return this.ctx.registry.getAsync(
return registry.getAsync(
`${__REGISTRY_NS__}:http_client:autoDetectVersion_${autodetectkey}`,
async () => {
const client = new HttpClient(this.options.httpConnection);
@ -142,7 +115,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
}
async getTrueNASHttpApiClient() {
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
return registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
const httpClient = await this.getHttpClient();
return new TrueNASApiClient(httpClient, this.ctx.cache);
});
@ -258,17 +231,8 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
const apiVersion = httpClient.getApiVersion();
const zb = await this.getZetabyte();
const truenasVersion = semver.coerce(
await httpApiClient.getSystemVersionMajorMinor(),
{ loose: true }
await httpApiClient.getSystemVersionMajorMinor()
);
if (!truenasVersion) {
throw new GrpcError(
grpc.status.UNKNOWN,
`unable to detect TrueNAS version`
);
}
const isScale = await httpApiClient.getIsScale();
let volume_context;
@ -350,11 +314,6 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
break;
}
if (isScale && semver.satisfies(truenasVersion, ">=23.10")) {
delete share.quiet;
delete share.nfs_quiet;
}
if (isScale && semver.satisfies(truenasVersion, ">=22.12")) {
share.path = share.paths[0];
delete share.paths;
@ -769,7 +728,6 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
// According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.'
// https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name
// https://tools.ietf.org/html/rfc3720
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
iscsiName = iscsiName.toLowerCase();
let extentDiskName = "zvol/" + datasetName;
@ -784,15 +742,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
if (extentDiskName.length > maxZvolNameLength) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`extent disk name cannot exceed ${maxZvolNameLength} characters: ${extentDiskName}`
);
}
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
if (isScale && iscsiName.length > 64) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`extent name cannot exceed 64 characters: ${iscsiName}`
`extent disk name cannot exceed ${maxZvolNameLength} characters: ${extentDiskName}`
);
}
@ -2033,9 +1983,6 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
let iscsiName =
properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
// name correlates to the extent NOT the target
let kName = iscsiName.replaceAll(".", "_");
/**
* command = execClient.buildCommand("systemctl", ["reload", "scst"]);
* does not help ^
@ -2048,11 +1995,10 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
*
* midclt resync_lun_size_for_zvol tank/foo/bar
* works on SCALE only ^
*
*/
command = execClient.buildCommand("sh", [
"-c",
`"echo 1 > /sys/kernel/scst_tgt/devices/${kName}/resync_size"`,
`echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size`,
]);
reload = true;
} else {
@ -2123,11 +2069,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
return 2;
}
if (systemVersion.v1) {
return 1;
}
return 2;
return 1;
}
async getIsFreeNAS() {
@ -2252,7 +2194,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
*/
try {
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
response = await httpClient.get(endpoint);
versionResponses.v2 = response;
if (response.statusCode == 200) {
versionInfo.v2 = response.body;
@ -2276,7 +2218,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
*/
try {
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
response = await httpClient.get(endpoint);
versionResponses.v1 = response;
if (response.statusCode == 200 && IsJsonString(response.body)) {
versionInfo.v1 = response.body;

View File

@ -5,13 +5,11 @@ const fs = require("fs");
const CsiProxyClient = require("../utils/csi_proxy_client").CsiProxyClient;
const k8s = require("@kubernetes/client-node");
const { GrpcError, grpc } = require("../utils/grpc");
const Handlebars = require("handlebars");
const { Mount } = require("../utils/mount");
const { ObjectiveFS } = require("../utils/objectivefs");
const { OneClient } = require("../utils/oneclient");
const { Filesystem } = require("../utils/filesystem");
const { ISCSI } = require("../utils/iscsi");
const { NVMEoF } = require("../utils/nvmeof");
const registry = require("../utils/registry");
const semver = require("semver");
const GeneralUtils = require("../utils/general");
const { Zetabyte } = require("../utils/zfs");
@ -110,7 +108,7 @@ class CsiBaseDriver {
* @returns Filesystem
*/
getDefaultFilesystemInstance() {
return this.ctx.registry.get(
return registry.get(
`${__REGISTRY_NS__}:default_filesystem_instance`,
() => {
return new Filesystem();
@ -124,7 +122,7 @@ class CsiBaseDriver {
* @returns Mount
*/
getDefaultMountInstance() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_mount_instance`, () => {
return registry.get(`${__REGISTRY_NS__}:default_mount_instance`, () => {
const filesystem = this.getDefaultFilesystemInstance();
return new Mount({ filesystem });
});
@ -136,25 +134,13 @@ class CsiBaseDriver {
* @returns ISCSI
*/
getDefaultISCSIInstance() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_iscsi_instance`, () => {
return registry.get(`${__REGISTRY_NS__}:default_iscsi_instance`, () => {
return new ISCSI();
});
}
/**
* Get an instance of the NVMEoF class
*
* @returns NVMEoF
*/
getDefaultNVMEoFInstance() {
const driver = this;
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_nvmeof_instance`, () => {
return new NVMEoF({ logger: driver.ctx.logger });
});
}
getDefaultZetabyteInstance() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_zb_instance`, () => {
return registry.get(`${__REGISTRY_NS__}:default_zb_instance`, () => {
return new Zetabyte({
idempotent: true,
paths: {
@ -176,29 +162,17 @@ class CsiBaseDriver {
}
getDefaultOneClientInstance() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_oneclient_instance`, () => {
return registry.get(`${__REGISTRY_NS__}:default_oneclient_instance`, () => {
return new OneClient();
});
}
getDefaultObjectiveFSInstance() {
const driver = this;
return this.ctx.registry.get(
`${__REGISTRY_NS__}:default_objectivefs_instance`,
() => {
return new ObjectiveFS({
pool: _.get(driver.options, "objectivefs.pool"),
});
}
);
}
/**
*
* @returns CsiProxyClient
*/
getDefaultCsiProxyClientInstance() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_csi_proxy_instance`, () => {
return registry.get(`${__REGISTRY_NS__}:default_csi_proxy_instance`, () => {
const options = {};
options.services = _.get(this.options, "node.csiProxy.services", {});
return new CsiProxyClient(options);
@ -206,7 +180,7 @@ class CsiBaseDriver {
}
getDefaultKubernetsConfigInstance() {
return this.ctx.registry.get(
return registry.get(
`${__REGISTRY_NS__}:default_kubernetes_config_instance`,
() => {
const kc = new k8s.KubeConfig();
@ -378,123 +352,26 @@ class CsiBaseDriver {
* the value of `volume_id` to play nicely with scenarios that do not support
* long names (ie: smb share, etc)
*
* per csi, strings have a max size of 128 bytes, volume_id should NOT
* execeed this limit
*
* Any Unicode string that conforms to the length limit is allowed
* except those containing the following banned characters:
* U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
* (These are control characters other than commonly used whitespace.)
*
* https://github.com/container-storage-interface/spec/blob/master/spec.md#size-limits
* https://docs.oracle.com/cd/E26505_01/html/E37384/gbcpt.html
*
* @param {*} call
* @param {*} name
* @returns
*/
async getVolumeIdFromCall(call) {
async getVolumeIdFromName(name) {
const driver = this;
let volume_id = call.request.name;
if (!volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume name is required`
);
}
const idTemplate = _.get(
driver.options,
"_private.csi.volume.idTemplate",
""
);
if (idTemplate) {
volume_id = Handlebars.compile(idTemplate)({
name: call.request.name,
parameters: call.request.parameters,
});
if (!volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`generated volume_id is empty, idTemplate may be invalid`
);
}
}
const hash_strategy = _.get(
const strategy = _.get(
driver.options,
"_private.csi.volume.idHash.strategy",
""
);
if (hash_strategy) {
switch (hash_strategy.toLowerCase()) {
case "md5":
volume_id = GeneralUtils.md5(volume_id);
break;
case "crc8":
volume_id = GeneralUtils.crc8(volume_id);
break;
case "crc16":
volume_id = GeneralUtils.crc16(volume_id);
break;
case "crc32":
volume_id = GeneralUtils.crc32(volume_id);
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unkown hash strategy: ${hash_strategy}`
);
}
switch (strategy.toLowerCase()) {
case "md5":
return GeneralUtils.md5(name);
case "crc32":
return GeneralUtils.crc32(name);
case "crc16":
return GeneralUtils.crc16(name);
default:
return name;
}
volume_id = String(volume_id);
if (volume_id.length > 128) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`generated volume_id '${volume_id}' is too large`
);
}
if (volume_id.length < 1) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`generated volume_id '${volume_id}' is too small`
);
}
/**
* technically zfs allows `:` and `.` in addition to `_` and `-`
* TODO: make this more specific to each driver
* in particular Nomad per-alloc feature uses names with <name>-[<index>] syntax so square brackets are present
* TODO: allow for replacing chars vs absolute failure?
*/
let invalid_chars;
invalid_chars = volume_id.match(/[^a-z0-9_\-]/gi);
if (invalid_chars) {
invalid_chars = String.prototype.concat(
...new Set(invalid_chars.join(""))
);
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`generated volume_id '${volume_id}' contains invalid characters: '${invalid_chars}'`
);
}
/**
* Dataset names must begin with an alphanumeric character.
*/
if (!/^[a-z0-9]/gi.test(volume_id)) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`generated volume_id '${volume_id}' must begin with alphanumeric character`
);
}
return volume_id;
}
async GetPluginInfo(call) {
@ -683,7 +560,6 @@ class CsiBaseDriver {
const mount = driver.getDefaultMountInstance();
const filesystem = driver.getDefaultFilesystemInstance();
const iscsi = driver.getDefaultISCSIInstance();
const nvmeof = driver.getDefaultNVMEoFInstance();
let result;
let device;
let block_device_info;
@ -743,7 +619,6 @@ class CsiBaseDriver {
}
switch (node_attach_driver) {
case "objectivefs":
case "oneclient":
// move along
break;
@ -817,11 +692,10 @@ class CsiBaseDriver {
if (!has_guest) {
mount_flags.push("guest");
}
}
// handle node service VOLUME_MOUNT_GROUP
if (volume_mount_group) {
mount_flags.push(`gid=${volume_mount_group}`);
if (volume_mount_group) {
mount_flags.push(`gid=${volume_mount_group}`);
}
}
break;
case "iscsi":
@ -914,18 +788,11 @@ class CsiBaseDriver {
);
}
const sessionParsedPortal = iscsi.parsePortal(session.portal);
// rescan in scenarios when login previously occurred but volumes never appeared
await iscsi.iscsiadm.rescanSession(session);
// find device name
device = await iscsi.devicePathByPortalIQNLUN(
//iscsiConnection.portal,
`${sessionParsedPortal.host}:${sessionParsedPortal.port}`,
iscsiConnection.iqn,
iscsiConnection.lun
);
device = `/dev/disk/by-path/ip-${iscsiConnection.portal}-iscsi-${iscsiConnection.iqn}-lun-${iscsiConnection.lun}`;
let deviceByPath = device;
// can take some time for device to show up, loop for some period
@ -1020,242 +887,6 @@ class CsiBaseDriver {
}
break;
case "nvmeof":
{
let transports = [];
if (volume_context.transport) {
transports.push(volume_context.transport.trim());
}
if (volume_context.transports) {
volume_context.transports.split(",").forEach((transport) => {
transports.push(transport.trim());
});
}
// ensure unique entries only
transports = [...new Set(transports)];
// stores actual device paths after nvmeof login
let nvmeofControllerDevices = [];
let nvmeofNamespaceDevices = [];
// stores configuration of targets/iqn/luns to connect to
let nvmeofConnections = [];
for (let transport of transports) {
nvmeofConnections.push({
transport,
nqn: volume_context.nqn,
nsid: volume_context.nsid,
});
}
for (let nvmeofConnection of nvmeofConnections) {
// connect
try {
await GeneralUtils.retry(15, 2000, async () => {
await nvmeof.connectByNQNTransport(
nvmeofConnection.nqn,
nvmeofConnection.transport
);
});
} catch (err) {
driver.ctx.logger.warn(
`error: ${JSON.stringify(err)} connecting to transport: ${
nvmeofConnection.transport
}`
);
continue;
}
// find controller device
let controllerDevice;
try {
await GeneralUtils.retry(15, 2000, async () => {
controllerDevice =
await nvmeof.controllerDevicePathByTransportNQN(
nvmeofConnection.transport,
nvmeofConnection.nqn,
nvmeofConnection.nsid
);
if (!controllerDevice) {
throw new Error(`failed to find controller device`);
}
});
} catch (err) {
driver.ctx.logger.warn(
`error finding nvme controller device: ${JSON.stringify(
err
)}`
);
continue;
}
// find namespace device
let namespaceDevice;
try {
await GeneralUtils.retry(15, 2000, async () => {
// rescan in scenarios when login previously occurred but volumes never appeared
// must be the NVMe char device, not the namespace device
await nvmeof.rescanNamespace(controllerDevice);
namespaceDevice =
await nvmeof.namespaceDevicePathByTransportNQNNamespace(
nvmeofConnection.transport,
nvmeofConnection.nqn,
nvmeofConnection.nsid
);
if (!controllerDevice) {
throw new Error(`failed to find namespace device`);
}
});
} catch (err) {
driver.ctx.logger.warn(
`error finding nvme namespace device: ${JSON.stringify(
err
)}`
);
continue;
}
// sanity check for device files
if (!namespaceDevice) {
continue;
}
// sanity check for device files
if (!controllerDevice) {
continue;
}
// can take some time for device to show up, loop for some period
result = await filesystem.pathExists(namespaceDevice);
let timer_start = Math.round(new Date().getTime() / 1000);
let timer_max = 30;
let deviceCreated = result;
while (!result) {
await GeneralUtils.sleep(2000);
result = await filesystem.pathExists(namespaceDevice);
if (result) {
deviceCreated = true;
break;
}
let current_time = Math.round(new Date().getTime() / 1000);
if (!result && current_time - timer_start > timer_max) {
driver.ctx.logger.warn(
`hit timeout waiting for namespace device node to appear: ${namespaceDevice}`
);
break;
}
}
if (deviceCreated) {
device = await filesystem.realpath(namespaceDevice);
nvmeofControllerDevices.push(controllerDevice);
nvmeofNamespaceDevices.push(namespaceDevice);
driver.ctx.logger.info(
`successfully logged into nvmeof transport ${nvmeofConnection.transport} and created controller device: ${controllerDevice}, namespace device: ${namespaceDevice}`
);
}
}
// let things settle
// this will help in dm scenarios
await GeneralUtils.sleep(2000);
// filter duplicates
nvmeofNamespaceDevices = nvmeofNamespaceDevices.filter(
(value, index, self) => {
return self.indexOf(value) === index;
}
);
nvmeofControllerDevices = nvmeofControllerDevices.filter(
(value, index, self) => {
return self.indexOf(value) === index;
}
);
// only throw an error if we were not able to attach to *any* devices
if (nvmeofNamespaceDevices.length < 1) {
throw new GrpcError(
grpc.status.UNKNOWN,
`unable to attach any nvme devices`
);
}
if (nvmeofControllerDevices.length != nvmeofConnections.length) {
driver.ctx.logger.warn(
`failed to attach all nvmeof devices/subsystems/transports`
);
// TODO: allow a parameter to control this behavior in some form
if (false) {
throw new GrpcError(
grpc.status.UNKNOWN,
`unable to attach all iscsi devices`
);
}
}
/**
* NVMEoF has native multipath capabilities without using device mapper
* You can disable the built-in using kernel param nvme_core.multipath=N/Y
*/
let useNativeMultipath = await nvmeof.nativeMultipathEnabled();
if (useNativeMultipath) {
// only throw an error if we were not able to attach to *any* devices
if (nvmeofNamespaceDevices.length > 1) {
throw new GrpcError(
grpc.status.UNKNOWN,
`too many nvme namespace devices, native multipath enabled therefore should only have 1`
);
}
} else {
// compare all device-mapper slaves with the newly created devices
// if any of the new devices are device-mapper slaves treat this as a
// multipath scenario
let allDeviceMapperSlaves =
await filesystem.getAllDeviceMapperSlaveDevices();
let commonDevices = allDeviceMapperSlaves.filter((value) =>
nvmeofNamespaceDevices.includes(value)
);
const useDMMultipath =
nvmeofConnections.length > 1 || commonDevices.length > 0;
// discover multipath device to use
if (useDMMultipath) {
device = await filesystem.getDeviceMapperDeviceFromSlaves(
nvmeofNamespaceDevices,
false
);
if (!device) {
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to discover multipath device`
);
}
} else {
// only throw an error if we were not able to attach to *any* devices
if (nvmeofNamespaceDevices.length > 1) {
throw new GrpcError(
grpc.status.UNKNOWN,
`too many nvme namespace devices, neither DM nor native multipath enabled`
);
}
}
}
}
break;
case "hostpath":
result = await mount.pathIsMounted(staging_target_path);
// if not mounted, mount
@ -1266,79 +897,6 @@ class CsiBaseDriver {
return {};
}
break;
case "objectivefs":
let objectivefs = driver.getDefaultObjectiveFSInstance();
let ofs_filesystem = volume_context.filesystem;
let env = {};
for (const key in normalizedSecrets) {
if (key.startsWith("env.")) {
env[key.substr("env.".length)] = normalizedSecrets[key];
}
}
for (const key in volume_context) {
if (key.startsWith("env.")) {
env[key.substr("env.".length)] = volume_context[key];
}
}
if (!ofs_filesystem) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`missing ofs volume filesystem`
);
}
let ofs_object_store = env["OBJECTSTORE"];
if (!ofs_object_store) {
ofs_object_store = await objectivefs.getObjectStoreFromFilesystem(
ofs_filesystem
);
if (ofs_object_store) {
env["OBJECTSTORE"] = ofs_object_store;
}
}
if (!ofs_object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`missing required ofs volume env.OBJECTSTORE`
);
}
// normalize fs to not include objectstore
ofs_filesystem = await objectivefs.stripObjectStoreFromFilesystem(
ofs_filesystem
);
device = `${ofs_object_store}${ofs_filesystem}`;
result = await mount.deviceIsMountedAtPath(
device,
staging_target_path
);
if (result) {
return {};
}
result = await objectivefs.mount(
env,
ofs_filesystem,
staging_target_path,
mount_flags
);
if (result) {
return {};
}
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to mount objectivefs: ${device}`
);
break;
case "oneclient":
let oneclient = driver.getDefaultOneClientInstance();
@ -1431,7 +989,6 @@ class CsiBaseDriver {
let is_block = false;
switch (node_attach_driver) {
case "iscsi":
case "nvmeof":
is_block = true;
break;
case "zfs-local":
@ -1496,16 +1053,6 @@ class CsiBaseDriver {
if (!Array.isArray(formatOptions)) {
formatOptions = [];
}
switch (fs_type) {
case "ext3":
case "ext4":
case "ext4dev":
// disable reserved blocks in this scenario
formatOptions.unshift("-m", "0");
break;
}
await filesystem.formatDevice(device, fs_type, formatOptions);
}
@ -1546,7 +1093,6 @@ class CsiBaseDriver {
fs_type = "cifs";
break;
case "iscsi":
case "nvmeof":
fs_type = "ext4";
break;
default:
@ -2442,7 +1988,6 @@ class CsiBaseDriver {
const mount = driver.getDefaultMountInstance();
const filesystem = driver.getDefaultFilesystemInstance();
const iscsi = driver.getDefaultISCSIInstance();
const nvmeof = driver.getDefaultNVMEoFInstance();
let result;
let is_block = false;
let is_device_mapper = false;
@ -2556,7 +2101,6 @@ class CsiBaseDriver {
}
if (is_block) {
let breakdeviceloop = false;
let realBlockDeviceInfos = [];
// detect if is a multipath device
is_device_mapper = await filesystem.isDeviceMapperDevice(
@ -2578,127 +2122,94 @@ class CsiBaseDriver {
// TODO: this could be made async to detach all simultaneously
for (const block_device_info_i of realBlockDeviceInfos) {
if (breakdeviceloop) {
break;
}
switch (block_device_info_i.tran) {
case "iscsi":
{
if (
await filesystem.deviceIsIscsi(block_device_info_i.path)
) {
let parent_block_device =
await filesystem.getBlockDeviceParent(
block_device_info_i.path
);
if (await filesystem.deviceIsIscsi(block_device_info_i.path)) {
let parent_block_device = await filesystem.getBlockDeviceParent(
block_device_info_i.path
);
// figure out which iscsi session this belongs to and logout
// scan /dev/disk/by-path/ip-*?
// device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`;
// parse output from `iscsiadm -m session -P 3`
let sessions = await iscsi.iscsiadm.getSessionsDetails();
for (let i = 0; i < sessions.length; i++) {
let session = sessions[i];
let is_attached_to_session = false;
// figure out which iscsi session this belongs to and logout
// scan /dev/disk/by-path/ip-*?
// device = `/dev/disk/by-path/ip-${volume_context.portal}-iscsi-${volume_context.iqn}-lun-${volume_context.lun}`;
// parse output from `iscsiadm -m session -P 3`
let sessions = await iscsi.iscsiadm.getSessionsDetails();
for (let i = 0; i < sessions.length; i++) {
let session = sessions[i];
let is_attached_to_session = false;
if (
session.attached_scsi_devices &&
session.attached_scsi_devices.host &&
session.attached_scsi_devices.host.devices
) {
is_attached_to_session =
session.attached_scsi_devices.host.devices.some(
(device) => {
if (
device.attached_scsi_disk ==
parent_block_device.name
) {
return true;
}
return false;
}
);
}
if (is_attached_to_session) {
let timer_start;
let timer_max;
timer_start = Math.round(new Date().getTime() / 1000);
timer_max = 30;
let loggedOut = false;
while (!loggedOut) {
try {
await iscsi.iscsiadm.logout(session.target, [
session.persistent_portal,
]);
loggedOut = true;
} catch (err) {
await GeneralUtils.sleep(2000);
let current_time = Math.round(
new Date().getTime() / 1000
);
if (current_time - timer_start > timer_max) {
// not throwing error for now as future invocations would not enter code path anyhow
loggedOut = true;
//throw new GrpcError(
// grpc.status.UNKNOWN,
// `hit timeout trying to logout of iscsi target: ${session.persistent_portal}`
//);
}
}
}
timer_start = Math.round(new Date().getTime() / 1000);
timer_max = 30;
let deletedEntry = false;
while (!deletedEntry) {
try {
await iscsi.iscsiadm.deleteNodeDBEntry(
session.target,
session.persistent_portal
);
deletedEntry = true;
} catch (err) {
await GeneralUtils.sleep(2000);
let current_time = Math.round(
new Date().getTime() / 1000
);
if (current_time - timer_start > timer_max) {
// not throwing error for now as future invocations would not enter code path anyhow
deletedEntry = true;
//throw new GrpcError(
// grpc.status.UNKNOWN,
// `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}`
//);
}
}
if (
session.attached_scsi_devices &&
session.attached_scsi_devices.host &&
session.attached_scsi_devices.host.devices
) {
is_attached_to_session =
session.attached_scsi_devices.host.devices.some(
(device) => {
if (
device.attached_scsi_disk == parent_block_device.name
) {
return true;
}
return false;
}
}
}
}
break;
case "nvme":
{
if (
await filesystem.deviceIsNVMEoF(block_device_info_i.path)
) {
let nqn = await nvmeof.nqnByNamespaceDeviceName(
block_device_info_i.name
);
if (nqn) {
await nvmeof.disconnectByNQN(nqn);
/**
* the above disconnects *all* devices with the nqn so we
* do NOT want to keep iterating all the 'real' devices
* in the case of DM multipath
*/
breakdeviceloop = true;
}
if (is_attached_to_session) {
let timer_start;
let timer_max;
timer_start = Math.round(new Date().getTime() / 1000);
timer_max = 30;
let loggedOut = false;
while (!loggedOut) {
try {
await iscsi.iscsiadm.logout(session.target, [
session.persistent_portal,
]);
loggedOut = true;
} catch (err) {
await GeneralUtils.sleep(2000);
let current_time = Math.round(
new Date().getTime() / 1000
);
if (current_time - timer_start > timer_max) {
// not throwing error for now as future invocations would not enter code path anyhow
loggedOut = true;
//throw new GrpcError(
// grpc.status.UNKNOWN,
// `hit timeout trying to logout of iscsi target: ${session.persistent_portal}`
//);
}
}
}
timer_start = Math.round(new Date().getTime() / 1000);
timer_max = 30;
let deletedEntry = false;
while (!deletedEntry) {
try {
await iscsi.iscsiadm.deleteNodeDBEntry(
session.target,
session.persistent_portal
);
deletedEntry = true;
} catch (err) {
await GeneralUtils.sleep(2000);
let current_time = Math.round(
new Date().getTime() / 1000
);
if (current_time - timer_start > timer_max) {
// not throwing error for now as future invocations would not enter code path anyhow
deletedEntry = true;
//throw new GrpcError(
// grpc.status.UNKNOWN,
// `hit timeout trying to delete iscsi node DB entry: ${session.target}, ${session.persistent_portal}`
//);
}
}
}
}
break;
}
}
}
}
@ -3025,11 +2536,9 @@ class CsiBaseDriver {
case "nfs":
case "smb":
case "lustre":
case "objectivefs":
case "oneclient":
case "hostpath":
case "iscsi":
case "nvmeof":
case "zfs-local":
// ensure appropriate directories/files
switch (access_type) {
@ -3696,8 +3205,6 @@ class CsiBaseDriver {
const driver = this;
const mount = driver.getDefaultMountInstance();
const filesystem = driver.getDefaultFilesystemInstance();
const nvmeof = driver.getDefaultNVMEoFInstance();
let device;
let fs_info;
let device_path;
@ -3760,14 +3267,6 @@ class CsiBaseDriver {
rescan_devices.push(device);
for (let sdevice of rescan_devices) {
let is_nvmeof = await filesystem.deviceIsNVMEoF(sdevice);
if (is_nvmeof) {
let controllers =
await nvmeof.getControllersByNamespaceDeviceName(sdevice);
for (let controller of controllers) {
await nvmeof.rescanNamespace(`/dev/${controller.Controller}`);
}
}
// TODO: technically rescan is only relevant/available for remote drives
// such as iscsi etc, should probably limit this call as appropriate
// for now crudely checking the scenario inside the method itself

View File

@ -121,10 +121,6 @@ class NodeManualDriver extends CsiBaseDriver {
driverResourceType = "filesystem";
fs_types = ["lustre"];
break;
case "objectivefs":
driverResourceType = "filesystem";
fs_types = ["objectivefs", "fuse.objectivefs"];
break;
case "oneclient":
driverResourceType = "filesystem";
fs_types = ["oneclient", "fuse.oneclient"];
@ -133,7 +129,6 @@ class NodeManualDriver extends CsiBaseDriver {
driverResourceType = "filesystem";
break;
case "iscsi":
case "nvmeof":
driverResourceType = "volume";
fs_types = ["btrfs", "ext3", "ext4", "ext4dev", "xfs"];
break;
@ -169,14 +164,6 @@ class NodeManualDriver extends CsiBaseDriver {
"MULTI_NODE_MULTI_WRITER",
];
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
@ -208,14 +195,6 @@ class NodeManualDriver extends CsiBaseDriver {
"MULTI_NODE_SINGLE_WRITER",
];
}
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
if (capability.access_type == "mount") {
if (
capability.mount.fs_type &&

View File

@ -2,8 +2,9 @@ const fs = require("fs");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const { Filesystem } = require("../../utils/filesystem");
const registry = require("../../utils/registry");
const semver = require("semver");
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
const SshClient = require("../../utils/ssh").SshClient;
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
// zfs common properties
@ -124,7 +125,7 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
}
getSshClient() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:ssh_client`, () => {
return registry.get(`${__REGISTRY_NS__}:ssh_client`, () => {
return new SshClient({
logger: this.ctx.logger,
connection: this.options.sshConnection,
@ -133,7 +134,7 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
}
getZetabyte() {
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
return registry.get(`${__REGISTRY_NS__}:zb`, () => {
let sshClient;
let executor;
if (this.options.sshConnection) {

View File

@ -500,35 +500,8 @@ class Filesystem {
let result;
try {
/**
* lsblk
* blkid
*/
const strategy =
process.env.FILESYSTEM_TYPE_DETECTION_STRATEGY || "lsblk";
switch (strategy) {
// requires udev data to be present otherwise fstype property is always null but otherwise succeeds
case "lsblk":
result = await filesystem.getBlockDevice(device);
return result.fstype ? true : false;
// no requirement on udev data to be present
case "blkid":
try {
result = await filesystem.getDeviceFilesystemInfo(device);
} catch (err) {
// if not formatted nor partitioned exits with 2
if (err.code == 2) {
return false;
}
throw err;
}
return result.type ? true : false;
// file -s <device> could also be an option
default:
throw new Error(`unknown filesystem detection strategy: ${strategy}`);
}
result = await filesystem.getBlockDevice(device);
return result.fstype ? true : false;
} catch (err) {
throw err;
}
@ -548,21 +521,6 @@ class Filesystem {
return result && result.tran == "iscsi";
}
async deviceIsNVMEoF(device) {
const filesystem = this;
let result;
do {
if (result) {
device = `/dev/${result.pkname}`;
}
result = await filesystem.getBlockDevice(device);
} while (result.pkname);
// TODO: add further logic here to ensure the device is not a local pcie/etc device
return result && result.tran == "nvme";
}
async getBlockDeviceParent(device) {
const filesystem = this;
let result;

View File

@ -1,33 +1,6 @@
const _ = require("lodash");
const axios = require("axios");
const crypto = require("crypto");
const dns = require("dns");
const crc = require("crc");
axios.interceptors.request.use(
function (config) {
config.metadata = { startTime: new Date() };
return config;
},
function (error) {
return Promise.reject(error);
}
);
axios.interceptors.response.use(
function (response) {
response.config.metadata.endTime = new Date();
response.duration =
response.config.metadata.endTime - response.config.metadata.startTime;
return response;
},
function (error) {
error.config.metadata.endTime = new Date();
error.duration =
error.config.metadata.endTime - error.config.metadata.startTime;
return Promise.reject(error);
}
);
function sleep(ms) {
return new Promise((resolve) => {
@ -35,31 +8,62 @@ function sleep(ms) {
});
}
function trimchar(str, ch) {
var start = 0,
end = str.length;
while (start < end && str[start] === ch) ++start;
while (end > start && str[end - 1] === ch) --end;
return start > 0 || end < str.length ? str.substring(start, end) : str;
}
function md5(val) {
return crypto.createHash("md5").update(val).digest("hex");
}
function crc8(data) {
return crc.crc8(data);
function crc32(val) {
for (var a, o = [], c = 0; c < 256; c++) {
a = c;
for (var f = 0; f < 8; f++) a = 1 & a ? 3988292384 ^ (a >>> 1) : a >>> 1;
o[c] = a;
}
for (var n = -1, t = 0; t < val.length; t++)
n = (n >>> 8) ^ o[255 & (n ^ val.charCodeAt(t))];
return (-1 ^ n) >>> 0;
}
const crctab16 = new Uint16Array([
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, 0x8c48,
0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, 0x1081, 0x0108,
0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, 0x9cc9, 0x8d40, 0xbfdb,
0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, 0x2102, 0x308b, 0x0210, 0x1399,
0x6726, 0x76af, 0x4434, 0x55bd, 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e,
0xfae7, 0xc87c, 0xd9f5, 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e,
0x54b5, 0x453c, 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd,
0xc974, 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, 0x5285,
0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, 0xdecd, 0xcf44,
0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, 0x6306, 0x728f, 0x4014,
0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, 0xef4e, 0xfec7, 0xcc5c, 0xddd5,
0xa96a, 0xb8e3, 0x8a78, 0x9bf1, 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3,
0x242a, 0x16b1, 0x0738, 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862,
0x9af9, 0x8b70, 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e,
0xf0b7, 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, 0x18c1,
0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, 0xa50a, 0xb483,
0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, 0x2942, 0x38cb, 0x0a50,
0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, 0xb58b, 0xa402, 0x9699, 0x8710,
0xf3af, 0xe226, 0xd0bd, 0xc134, 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7,
0x6e6e, 0x5cf5, 0x4d7c, 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1,
0xa33a, 0xb2b3, 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72,
0x3efb, 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, 0xe70e,
0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, 0x6b46, 0x7acf,
0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, 0xf78f, 0xe606, 0xd49d,
0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, 0x7bc7, 0x6a4e, 0x58d5, 0x495c,
0x3de3, 0x2c6a, 0x1ef1, 0x0f78,
]);
// calculate the 16-bit CRC of data with predetermined length.
function crc16(data) {
return crc.crc16(data);
}
var res = 0x0ffff;
function crc32(data) {
return crc.crc32(data);
for (let b of data) {
res = ((res >> 8) & 0x0ff) ^ crctab16[(res ^ b) & 0xff];
}
return ~res & 0x0ffff;
}
function lockKeysFromRequest(call, serviceMethodName) {
@ -83,10 +87,10 @@ function lockKeysFromRequest(call, serviceMethodName) {
case "NodeUnstageVolume":
case "NodePublishVolume":
case "NodeUnpublishVolume":
case "NodeGetVolumeStats":
case "NodeExpandVolume":
return ["volume_id_" + call.request.volume_id];
case "NodeGetVolumeStats":
default:
return [];
}
@ -181,20 +185,6 @@ function stringify(value) {
return JSON.stringify(value, getCircularReplacer());
}
function before_string(target, search) {
if (!target.includes(search)) {
return "";
}
return target.substring(0, target.indexOf(search));
}
function after_string(target, search) {
if (!target.includes(search)) {
return "";
}
return target.substring(target.indexOf(search) + search.length);
}
function default_supported_block_filesystems() {
return ["btrfs", "exfat", "ext3", "ext4", "ext4dev", "ntfs", "vfat", "xfs"];
}
@ -223,7 +213,6 @@ async function retry(retries, retriesDelay, code, options = {}) {
let retry = retryCondition(err);
if (!retry) {
console.log(`retry - failed condition, not trying again`);
//console.log(code.toString(), retryCondition.toString());
throw err;
}
}
@ -260,28 +249,13 @@ async function retry(retries, retriesDelay, code, options = {}) {
} while (true);
}
async function hostname_lookup(hostname) {
return new Promise((resolve, reject) => {
dns.lookup(hostname, function (err, result) {
if (err) {
return reject(err);
}
return resolve(result);
});
});
}
module.exports.sleep = sleep;
module.exports.md5 = md5;
module.exports.crc32 = crc32;
module.exports.crc16 = crc16;
module.exports.crc8 = crc8;
module.exports.lockKeysFromRequest = lockKeysFromRequest;
module.exports.getLargestNumber = getLargestNumber;
module.exports.stringify = stringify;
module.exports.before_string = before_string;
module.exports.after_string = after_string;
module.exports.stripWindowsDriveLetter = stripWindowsDriveLetter;
module.exports.hasWindowsDriveLetter = hasWindowsDriveLetter;
module.exports.axios_request = axios_request;
@ -290,5 +264,3 @@ module.exports.default_supported_block_filesystems =
module.exports.default_supported_file_filesystems =
default_supported_file_filesystems;
module.exports.retry = retry;
module.exports.trimchar = trimchar;
module.exports.hostname_lookup = hostname_lookup;

View File

@ -1,6 +1,5 @@
const cp = require("child_process");
const { hostname_lookup, sleep } = require("./general");
const net = require("net");
const { sleep } = require("./general");
function getIscsiValue(value) {
if (value == "<empty>") return null;
@ -179,45 +178,9 @@ class ISCSI {
async getSession(tgtIQN, portal) {
const sessions = await iscsi.iscsiadm.getSessions();
let parsedPortal = iscsi.parsePortal(portal);
let parsedPortalHostIP = "";
if (parsedPortal.host) {
// if host is not an ip address
let parsedPortalHost = parsedPortal.host
.replaceAll("[", "")
.replaceAll("]", "");
if (net.isIP(parsedPortalHost) == 0) {
// ipv6 response is without []
try {
parsedPortalHostIP =
(await hostname_lookup(parsedPortal.host)) || "";
} catch (err) {
console.log(
`failed to lookup hostname: host - ${parsedPortal.host}, error - ${err}`
);
}
}
}
// set invalid hostname/ip string to ensure empty values do not errantly pass
if (!parsedPortalHostIP) {
parsedPortalHostIP = "--------------------------------------";
}
let session = false;
sessions.every((i_session) => {
// [2a10:4741:36:28:e61d:2dff:fe90:80fe]:3260
// i_session.portal includes [] for ipv6
if (
`${i_session.iqn}` == tgtIQN &&
(portal == i_session.portal ||
`${parsedPortal.host}:${parsedPortal.port}` == i_session.portal ||
`${parsedPortalHostIP}:${parsedPortal.port}` ==
i_session.portal ||
`[${parsedPortal.host}]:${parsedPortal.port}` ==
i_session.portal ||
`[${parsedPortalHostIP}]:${parsedPortal.port}` ==
i_session.portal)
) {
if (`${i_session.iqn}` == tgtIQN && portal == i_session.portal) {
session = i_session;
return false;
}
@ -252,19 +215,10 @@ class ISCSI {
// protocol: [id] ip:port,target_portal_group_tag targetname
// tcp: [111] [2001:123:456::1]:3260,1 iqn.2005-10.org.freenas.ctl:default-aptcacher-iscsi-claim (non-flash)
// tcp: [111] [hostname]:3260,1 iqn.2005-10.org.freenas.ctl:default-aptcacher-iscsi-claim (non-flash)
let data;
data = result.stdout;
if (!data) {
data = "";
}
const entries = data.trim().split("\n");
const entries = result.stdout.trim().split("\n");
const sessions = [];
let fields;
entries.forEach((entry) => {
if (!entry) {
return;
}
fields = entry.split(" ");
sessions.push({
protocol: entry.split(":")[0],
@ -562,45 +516,6 @@ class ISCSI {
};
}
parsePortal(portal) {
portal = portal.trim();
let host = null;
let port = null;
// ipv6
if (portal.startsWith("[")) {
host = portal.substr(0, portal.indexOf("]") + 1);
port = portal.substr(portal.indexOf("]") + 2);
} else {
const lastIndex = portal.lastIndexOf(":");
if (lastIndex !== -1) {
host = portal.slice(0, lastIndex);
port = portal.slice(lastIndex + 1);
} else {
host = portal;
}
}
if (!port) {
port = 3260;
}
return {
host,
port: parseInt(port),
};
}
async devicePathByPortalIQNLUN(portal, iqn, lun, options = {}) {
const parsedPortal = this.parsePortal(portal);
let portalHost = parsedPortal.host.replaceAll("[", "").replaceAll("]", "");
if (options.hostname_lookup && net.isIP(portalHost) == 0) {
portalHost = (await hostname_lookup(portalHost)) || portalHost;
}
return `/dev/disk/by-path/ip-${portalHost}:${parsedPortal.port}-iscsi-${iqn}-lun-${lun}`;
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;

View File

@ -1,349 +0,0 @@
const _ = require("lodash");
const cp = require("child_process");
const uuidv4 = require("uuid").v4;
const DEFAULT_TIMEOUT = process.env.KOPIA_DEFAULT_TIMEOUT || 90000;
/**
* https://kopia.io/
*/
class Kopia {
constructor(options = {}) {
const kopia = this;
kopia.options = options;
kopia.client_intance_uuid = uuidv4();
options.paths = options.paths || {};
if (!options.paths.kopia) {
options.paths.kopia = "kopia";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.env) {
options.env = {};
}
options.env[
"KOPIA_CONFIG_PATH"
] = `/tmp/kopia/${kopia.client_intance_uuid}/repository.config`;
options.env["KOPIA_CHECK_FOR_UPDATES"] = "false";
options.env[
"KOPIA_CACHE_DIRECTORY"
] = `/tmp/kopia/${kopia.client_intance_uuid}/cache`;
options.env[
"KOPIA_LOG_DIR"
] = `/tmp/kopia/${kopia.client_intance_uuid}/log`;
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
};
}
if (!options.logger) {
options.logger = console;
}
options.logger.info(
`kopia client instantiated with client_instance_uuid: ${kopia.client_intance_uuid}`
);
if (!options.global_flags) {
options.global_flags = [];
}
}
/**
* kopia repository connect
*
* https://kopia.io/docs/reference/command-line/common/repository-connect-from-config/
*
* --override-hostname
* --override-username
*
* @param {*} options
*/
async repositoryConnect(options = []) {
const kopia = this;
let args = ["repository", "connect"];
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
try {
await kopia.exec(kopia.options.paths.kopia, args);
return;
} catch (err) {
throw err;
}
}
/**
* kopia repository status
*
* @param {*} options
*/
async repositoryStatus(options = []) {
const kopia = this;
let args = ["repository", "status", "--json"];
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args);
return result;
} catch (err) {
throw err;
}
}
/**
* kopia snapshot list
*
* @param {*} options
*/
async snapshotList(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "list", "--json"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-list",
});
return result.parsed;
} catch (err) {
throw err;
}
}
/**
* kopia snapshot list
*
* @param {*} snapshot_id
*/
async snapshotGet(snapshot_id) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "list", "--json", "--all"]);
args = args.concat(kopia.options.global_flags);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-list",
});
return result.parsed.find((item) => {
return item.id == snapshot_id;
});
} catch (err) {
throw err;
}
}
/**
* kopia snapshot create
*
* @param {*} options
*/
async snapshotCreate(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "create", "--json"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-create",
});
return result.parsed;
} catch (err) {
throw err;
}
}
/**
* kopia snapshot delete <id>
*
* @param {*} options
*/
async snapshotDelete(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "delete", "--delete"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-delete",
});
return result;
} catch (err) {
if (
err.code == 1 &&
(err.stderr.includes("no snapshots matched") ||
err.stderr.includes("invalid content hash"))
) {
return;
}
throw err;
}
}
/**
* kopia snapshot restore <snapshot_id[/sub/path]> /path/to/restore/to
*
* @param {*} options
*/
async snapshotRestore(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "restore"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-restore",
});
return result;
} catch (err) {
if (
err.code == 1 &&
(err.stderr.includes("no snapshots matched") ||
err.stderr.includes("invalid content hash"))
) {
return;
}
throw err;
}
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const kopia = this;
args = args || [];
if (kopia.options.sudo) {
args.unshift(command);
command = kopia.options.paths.sudo;
}
options.env = {
...{},
...process.env,
...kopia.options.env,
...options.env,
};
let tokenIndex = args.findIndex((value) => {
return value.trim() == "--token";
});
let cleansedArgs = [...args];
if (tokenIndex >= 0) {
cleansedArgs[tokenIndex + 1] = "redacted";
}
const cleansedLog = `${command} ${cleansedArgs.join(" ")}`;
console.log("executing kopia command: %s", cleansedLog);
return new Promise((resolve, reject) => {
let stdin;
if (options.stdin) {
stdin = options.stdin;
delete options.stdin;
}
const child = kopia.options.executor.spawn(command, args, options);
if (stdin) {
child.stdin.write(stdin);
}
let stdout = "";
let stderr = "";
const log_progress_output = _.debounce(
(data) => {
const lines = data.split("\n");
/**
* get last line, remove spinner, etc
*/
const line = lines
.slice(-1)[0]
.trim()
.replace(/^[\/\\\-\|] /gi, "");
kopia.options.logger.info(
`kopia ${options.operation} progress: ${line.trim()}`
);
},
250,
{ leading: true, trailing: true, maxWait: 5000 }
);
child.stdout.on("data", function (data) {
data = String(data);
stdout += data;
});
child.stderr.on("data", function (data) {
data = String(data);
stderr += data;
switch (options.operation) {
case "snapshot-create":
log_progress_output(data);
break;
default:
break;
}
});
child.on("close", function (code) {
const result = { code, stdout, stderr, timeout: false };
if (!result.parsed) {
try {
result.parsed = JSON.parse(result.stdout);
} catch (err) {}
}
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.Kopia = Kopia;

View File

@ -1,579 +0,0 @@
const cp = require("child_process");
const { hostname_lookup, trimchar } = require("./general");
const URI = require("uri-js");
const querystring = require("querystring");
const DEFAULT_TIMEOUT = process.env.NVMEOF_DEFAULT_TIMEOUT || 30000;
class NVMEoF {
constructor(options = {}) {
const nvmeof = this;
nvmeof.options = options;
options.paths = options.paths || {};
if (!options.paths.nvme) {
options.paths.nvme = "nvme";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
};
}
if (nvmeof.options.logger) {
nvmeof.logger = nvmeof.options.logger;
} else {
nvmeof.logger = console;
console.verbose = function() {
console.log(...arguments);
}
}
}
/**
* List all NVMe devices and namespaces on machine
*
* @param {*} args
*/
async list(args = []) {
const nvmeof = this;
args.unshift("list", "-o", "json");
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
return result.parsed;
}
/**
* List nvme subsystems
*
* @param {*} args
*/
async listSubsys(args = []) {
const nvmeof = this;
args.unshift("list-subsys", "-o", "json");
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
return result.parsed;
}
/**
* Discover NVMeoF subsystems
*
* @param {*} transport
* @param {*} args
* @returns
*/
async discover(transport, args = []) {
const nvmeof = this;
transport = await nvmeof.parseTransport(transport);
let transport_args = [];
if (transport.type) {
transport_args.push("--transport", transport.type);
}
if (transport.address) {
transport_args.push("--traddr", transport.address);
}
if (transport.service) {
transport_args.push("--trsvcid", transport.service);
}
args.unshift("discover", "-o", "json", ...transport_args);
let result = await nvmeof.exec(nvmeof.options.paths.nvme, args);
return result.parsed;
}
/**
* Connect to NVMeoF subsystem
*
* @param {*} args
*/
async connectByNQNTransport(nqn, transport, args = []) {
const nvmeof = this;
transport = await nvmeof.parseTransport(transport);
let transport_args = [];
if (transport.type) {
transport_args.push("--transport", transport.type);
}
if (transport.address) {
transport_args.push("--traddr", transport.address);
}
if (transport.service) {
transport_args.push("--trsvcid", transport.service);
}
if (transport.args) {
for (let arg in transport.args) {
let value = transport.args[arg];
if (!arg.startsWith("-")) {
arg = `--${arg}`;
}
transport_args.push(arg, value);
}
}
args.unshift("connect", "--nqn", nqn, ...transport_args);
try {
await nvmeof.exec(nvmeof.options.paths.nvme, args);
} catch (err) {
if (
err.stderr &&
(err.stderr.includes("already connected") ||
err.stderr.includes("Operation already in progress"))
) {
// idempotent
} else {
throw err;
}
}
}
/**
* Disconnect from NVMeoF subsystem
*
* @param {*} args
*/
async disconnectByNQN(nqn, args = []) {
const nvmeof = this;
args.unshift("disconnect", "--nqn", nqn);
await nvmeof.exec(nvmeof.options.paths.nvme, args);
}
/**
* Disconnect from NVMeoF subsystem
*
* @param {*} args
*/
async disconnectByDevice(device, args = []) {
const nvmeof = this;
args.unshift("disconnect", "--device", device);
await nvmeof.exec(nvmeof.options.paths.nvme, args);
}
/**
* Rescans the NVME namespaces
*
* @param {*} device
* @param {*} args
*/
async rescanNamespace(device, args = []) {
const nvmeof = this;
args.unshift("ns-rescan", device);
await nvmeof.exec(nvmeof.options.paths.nvme, args);
}
async deviceIsNamespaceDevice(device) {
const nvmeof = this;
device = device.replace("/dev/", "");
const subsystems = await nvmeof.getSubsystems();
for (let subsystem of subsystems) {
// check subsystem namespaces
if (subsystem.Namespaces) {
for (let namespace of subsystem.Namespaces) {
if (namespace.NameSpace == device) {
return true;
}
}
}
// check controller namespaces
if (subsystem.Controllers) {
for (let controller of subsystem.Controllers) {
if (controller.Namespaces) {
for (let namespace of controller.Namespaces) {
if (namespace.NameSpace == device) {
return true;
}
}
}
}
}
}
return false;
}
async deviceIsControllerDevice(device) {
const nvmeof = this;
device = device.replace("/dev/", "");
const subsystems = await nvmeof.getSubsystems();
for (let subsystem of subsystems) {
if (subsystem.Controllers) {
for (let controller of subsystem.Controllers) {
if (controller.Controller == device) {
return true;
}
}
}
}
return false;
}
async parseTransport(transport) {
if (typeof transport === "object") {
return transport;
}
transport = transport.trim();
const parsed = URI.parse(transport);
let args = querystring.parse(parsed.query);
let type = parsed.scheme;
let address = parsed.host;
let service;
switch (parsed.scheme) {
case "fc":
case "rdma":
case "tcp":
type = parsed.scheme;
break;
default:
throw new Error(`unknown nvme transport type: ${parsed.scheme}`);
}
switch (type) {
case "fc":
address = trimchar(address, "[");
address = trimchar(address, "]");
break;
case "tcp":
/**
* kernel stores value as ip, so if address passed as hostname then
* translate to ip address
*
* TODO: this could be brittle
*/
let lookup = await hostname_lookup(address);
if (lookup) {
address = lookup;
}
break;
}
switch (type) {
case "rdma":
case "tcp":
service = parsed.port;
if (!service) {
service = 4420;
}
break;
}
return {
type,
address,
service,
args,
};
}
async pathExists(path) {
const nvmeof = this;
try {
await nvmeof.exec("stat", [
path,
]);
return true;
} catch (err) {
return false;
}
}
async nativeMultipathEnabled() {
const nvmeof = this;
let result;
try {
result = await nvmeof.exec("cat", [
"/sys/module/nvme_core/parameters/multipath",
]);
} catch (err) {
if (err.code == 1 && err.stderr.includes("No such file or directory")) {
return false;
}
throw err;
}
return result.stdout.trim() == "Y";
}
async namespaceDevicePathByTransportNQNNamespace(transport, nqn, namespace) {
const nvmeof = this;
transport = await nvmeof.parseTransport(transport);
let nativeMultipathEnabled = await nvmeof.nativeMultipathEnabled();
if (nativeMultipathEnabled) {
let subsystem = await nvmeof.getSubsystemByNQN(nqn);
if (subsystem) {
for (let i_namespace of subsystem.Namespaces) {
if (i_namespace.NSID != namespace) {
continue;
} else {
return `/dev/${i_namespace.NameSpace}`;
}
}
}
} else {
let controller = await nvmeof.getControllerByTransportNQN(transport, nqn);
if (controller) {
for (let i_namespace of controller.Namespaces) {
if (i_namespace.NSID != namespace) {
continue;
} else {
return `/dev/${i_namespace.NameSpace}`;
}
}
}
}
}
async controllerDevicePathByTransportNQN(transport, nqn) {
const nvmeof = this;
transport = await nvmeof.parseTransport(transport);
let controller = await nvmeof.getControllerByTransportNQN(transport, nqn);
if (controller) {
return `/dev/${controller.Controller}`;
}
}
async getSubsystems() {
const nvmeof = this;
let result = await nvmeof.list(["-v"]);
return nvmeof.getNormalizedSubsystems(result);
}
/**
* used to normalize subsystem list/response across different versions of nvme-cli
*
* @param {*} result
* @returns
*/
async getNormalizedSubsystems(result) {
let subsystems = [];
for (let device of result.Devices) {
if (Array.isArray(device.Subsystems)) {
subsystems = subsystems.concat(device.Subsystems);
} else if (device.Subsystem) {
// nvme-cli 1.x support
subsystems.push(device);
}
}
return subsystems;
}
async getSubsystemByNQN(nqn) {
const nvmeof = this;
const subsystems = await nvmeof.getSubsystems();
for (let subsystem of subsystems) {
if (subsystem.SubsystemNQN == nqn) {
return subsystem;
}
}
nvmeof.logger.warn(`failed to find subsystem for nqn: ${nqn}`);
}
async getControllersByNamespaceDeviceName(name) {
const nvmeof = this;
name = name.replace("/dev/", "");
let nativeMultipathEnabled = await nvmeof.nativeMultipathEnabled();
const subsystems = await nvmeof.getSubsystems();
if (nativeMultipathEnabled) {
// using per-subsystem namespace
for (let subsystem of subsystems) {
if (subsystem.Namespaces) {
for (let namespace of subsystem.Namespaces) {
if (namespace.NameSpace == name) {
return subsystem.Controllers;
}
}
}
}
} else {
// using per-controller namespace
for (let subsystem of subsystems) {
if (subsystem.Controllers) {
for (let controller of subsystem.Controllers) {
if (controller.Namespaces) {
for (let namespace of controller.Namespaces) {
if (namespace.NameSpace == name) {
return subsystem.Controllers;
}
}
}
}
}
}
}
nvmeof.logger.warn(`failed to find controllers for device: ${name}`);
return [];
}
async getControllerByTransportNQN(transport, nqn) {
const nvmeof = this;
transport = await nvmeof.parseTransport(transport);
let subsystem = await nvmeof.getSubsystemByNQN(nqn);
if (subsystem) {
for (let controller of subsystem.Controllers) {
if (controller.Transport != transport.type) {
continue;
}
let controllerAddress = controller.Address;
/**
* For backwards compatibility with older nvme-cli versions (at least < 2.2.1)
* old: "Address":"traddr=127.0.0.1 trsvcid=4420"
* new: "Address":"traddr=127.0.0.1,trsvcid=4420"
*/
controllerAddress = controllerAddress.replace(
new RegExp(/ ([a-z_]*=)/, "g"),
",$1"
);
let parts = controllerAddress.split(",");
let traddr;
let trsvcid;
for (let i_part of parts) {
let i_parts = i_part.split("=");
switch (i_parts[0].trim()) {
case "traddr":
traddr = i_parts[1].trim();
break;
case "trsvcid":
trsvcid = i_parts[1].trim();
break;
}
}
if (traddr != transport.address) {
continue;
}
if (transport.service && trsvcid != transport.service) {
continue;
}
return controller;
}
}
nvmeof.logger.warn(
`failed to find controller for transport: ${JSON.stringify(
transport
)}, nqn: ${nqn}`
);
}
async nqnByNamespaceDeviceName(name) {
const nvmeof = this;
name = name.replace("/dev/", "");
let nativeMultipathEnabled = await nvmeof.nativeMultipathEnabled();
const subsystems = await nvmeof.getSubsystems();
if (nativeMultipathEnabled) {
// using per-subsystem namespace
for (let subsystem of subsystems) {
if (subsystem.Namespaces) {
for (let namespace of subsystem.Namespaces) {
if (namespace.NameSpace == name) {
return subsystem.SubsystemNQN;
}
}
}
}
} else {
// using per-controller namespace
for (let subsystem of subsystems) {
if (subsystem.Controllers) {
for (let controller of subsystem.Controllers) {
if (controller.Namespaces) {
for (let namespace of controller.Namespaces) {
if (namespace.NameSpace == name) {
return subsystem.SubsystemNQN;
}
}
}
}
}
}
}
nvmeof.logger.warn(`failed to find nqn for device: ${name}`);
}
devicePathByModelNumberSerialNumber(modelNumber, serialNumber) {
modelNumber = modelNumber.replaceAll(" ", "_");
serialNumber = serialNumber.replaceAll(" ", "_");
return `/dev/disk/by-id/nvme-${modelNumber}_${serialNumber}`;
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const nvmeof = this;
args = args || [];
if (nvmeof.options.sudo) {
args.unshift(command);
command = nvmeof.options.paths.sudo;
}
nvmeof.logger.verbose(
"executing nvmeof command: %s %s",
command,
args.join(" ")
);
return new Promise((resolve, reject) => {
const child = nvmeof.options.executor.spawn(command, args, options);
let stdout = "";
let stderr = "";
child.stdout.on("data", function (data) {
stdout = stdout + data;
});
child.stderr.on("data", function (data) {
stderr = stderr + data;
});
child.on("close", function (code) {
const result = { code, stdout, stderr, timeout: false };
try {
result.parsed = JSON.parse(result.stdout);
} catch (err) {}
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.NVMEoF = NVMEoF;

View File

@ -1,369 +0,0 @@
const cp = require("child_process");
const GeneralUtils = require("./general");
const DEFAULT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
const EXIT_CODES = {
64: "administrator can not mount filesystems",
65: "unable to decrypt using passphrase",
78: "missing or invalid passphrase",
};
/**
* https://objectivefs.com/
*/
class ObjectiveFS {
constructor(options = {}) {
const objectivefs = this;
objectivefs.options = options;
options.paths = options.paths || {};
if (!options.paths.objectivefs) {
options.paths.objectivefs = "mount.objectivefs";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.env) {
options.env = {};
}
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
//spawn: cp.execFile,
};
}
}
/**
* mount.objectivefs [-o <opt>[,<opt>]..] <filesystem> <dir>
*
* @param {*} env
* @param {*} filesystem
* @param {*} target
* @param {*} options
*/
async mount(env, filesystem, target, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = [];
if (options.length > 0) {
// TODO: maybe do -o <opt> -o <opt>?
args = args.concat(["-o", options.join(",")]);
}
args = args.concat([filesystem, target]);
let result;
try {
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env, operation: "mount" }
);
return result;
} catch (err) {
throw err;
}
}
/**
* mount.objectivefs create <your filesystem name>
* mount.objectivefs create -f <bucket>/<fs>
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async create(env, filesystem, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = ["create"];
args = args.concat(options);
args = args.concat([filesystem]);
let result;
try {
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env }
);
return result;
} catch (err) {
if (err.code == 1 && err.stderr.includes("filesystem already exists")) {
return;
}
throw err;
}
}
/**
* echo 'y' | mount.objectivefs destroy <bucket>/<fs>
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async destroy(env, filesystem, options = []) {
const objectivefs = this;
if (!env) {
env = {};
}
filesystem = await objectivefs.stripObjectStoreFromFilesystem(filesystem);
/**
* delete safety checks for filesystem
*
* while it is possible to delete a fs without a pool we
* should never be doing that in democratic-csi
*/
let fs_parts = filesystem.split("/");
if (fs_parts.length != 2) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
if (!fs_parts[0]) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
let pool = objectivefs.options.pool;
pool = await objectivefs.stripObjectStoreFromFilesystem(pool);
if (!pool) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
if (fs_parts[0].trim() != pool.trim()) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
if (!fs_parts[1]) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
let args = ["destroy"];
args = args.concat(options);
args = args.concat([filesystem]);
let result;
try {
result = await objectivefs.exec(
"/bin/bash",
[
"-c",
`echo y | ${objectivefs.options.paths.objectivefs} ${args.join(" ")}`,
],
{ env }
);
return result;
} catch (err) {
if (
err.code == 68 &&
err.stdout.includes("does not look like an ObjectiveFS filesystem")
) {
return;
}
throw err;
}
}
parseListOutput(data) {
const lines = data.split("\n");
let headers = [];
let entries = [];
lines.forEach((line, i) => {
if (line.length < 1) {
return;
}
const parts = line.split("\t");
if (i == 0) {
headers = parts.map((header) => {
return header.trim();
});
return;
}
let entry = {};
headers.forEach((name, index) => {
entry[name.trim()] = parts[index].trim();
});
entries.push(entry);
});
return entries;
}
/**
* mount.objectivefs list [-asvz] [<filesystem>[@<time>]]
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async list(env, filesystem = null, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = ["list"];
args = args.concat(options);
if (filesystem) {
args = args.concat([filesystem]);
}
let result;
try {
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env }
);
return objectivefs.parseListOutput(result.stdout);
} catch (err) {
throw err;
}
}
/**
* mount.objectivefs snapshot <filesystem>
*
* NOTE: fs must be mount on node to function
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async snapshot(env, filesystem = null, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = ["list"];
args = args.concat(options);
if (filesystem) {
args = args.concat([filesystem]);
}
let result;
try {
// NOTE: Successfully created snapshot: minio://ofs/test@2024-02-13T07:56:38Z (2024-02-13T00:56:38)
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env }
);
return result;
} catch (err) {
throw err;
}
}
async getObjectStoreFromFilesystem(filesystem) {
if (filesystem.includes("://")) {
return GeneralUtils.before_string(filesystem, "://") + "://";
}
}
async stripObjectStoreFromFilesystem(filesystem) {
if (filesystem.includes("://")) {
return GeneralUtils.after_string(filesystem, "://");
}
return filesystem;
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const objectivefs = this;
args = args || [];
if (objectivefs.options.sudo) {
args.unshift(command);
command = objectivefs.options.paths.sudo;
}
options.env = { ...{}, ...objectivefs.options.env, ...options.env };
// truncate admin key during mount operations
if (options.operation == "mount") {
delete options.operation;
// standard license is 24
// admin key is 8
if (
options.env.OBJECTIVEFS_LICENSE &&
options.env.OBJECTIVEFS_LICENSE.length > 24
) {
options.env.OBJECTIVEFS_LICENSE =
options.env.OBJECTIVEFS_LICENSE.substr(0, 24);
}
}
options.env.PATH = process.env.PATH;
const cleansedLog = `${command} ${args.join(" ")}`;
console.log("executing objectivefs command: %s", cleansedLog);
//console.log(options.env);
return new Promise((resolve, reject) => {
let stdin;
if (options.stdin) {
stdin = options.stdin;
delete options.stdin;
}
const child = objectivefs.options.executor.spawn(command, args, options);
if (stdin) {
child.stdin.write(stdin);
}
let stdout = "";
let stderr = "";
child.stdout.on("data", function (data) {
stdout = stdout + data;
});
child.stderr.on("data", function (data) {
stderr = stderr + data;
});
child.on("close", function (code) {
if (!stderr && EXIT_CODES[code]) {
stderr += EXIT_CODES[code];
}
const result = { code, stdout, stderr, timeout: false };
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.ObjectiveFS = ObjectiveFS;

View File

@ -3,7 +3,3 @@ if (typeof String.prototype.replaceAll == "undefined") {
return this.replace(new RegExp(match, "g"), () => replace);
};
}
Array.prototype.random = function () {
return this[Math.floor(Math.random() * this.length)];
};

View File

@ -48,4 +48,6 @@ class Registry {
}
}
module.exports.Registry = Registry;
const registry = new Registry();
module.exports = registry;

View File

@ -1,494 +0,0 @@
const _ = require("lodash");
const cp = require("child_process");
const DEFAULT_TIMEOUT = process.env.RESTIC_DEFAULT_TIMEOUT || 90000;
/**
* https://restic.net/
*/
class Restic {
constructor(options = {}) {
const restic = this;
restic.options = options;
options.paths = options.paths || {};
if (!options.paths.restic) {
options.paths.restic = "restic";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.env) {
options.env = {};
}
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
};
}
if (!options.logger) {
options.logger = console;
}
if (!options.global_flags) {
options.global_flags = [];
}
}
/**
* restic init
*
* @param {*} options
*/
async init(options = []) {
const restic = this;
let args = ["init", "--json"];
args = args.concat(restic.options.global_flags);
args = args.concat(options);
try {
await restic.exec(restic.options.paths.restic, args);
return;
} catch (err) {
if (err.code == 1 && err.stderr.includes("already")) {
return;
}
throw err;
}
}
/**
* restic unlock
*
* @param {*} options
*/
async unlock(options = []) {
const restic = this;
let args = ["unlock", "--json"];
args = args.concat(restic.options.global_flags);
args = args.concat(options);
try {
await restic.exec(restic.options.paths.restic, args);
return;
} catch (err) {
throw err;
}
}
/**
* restic backup
*
* @param {*} path
* @param {*} options
*/
async backup(path, options = []) {
const restic = this;
let args = [];
args = args.concat(["backup", "--json"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
args = args.concat([path]);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "backup",
timeout: 0,
});
return result;
} catch (err) {
throw err;
}
}
/**
* restic tag
*
* @param {*} options
*/
async tag(options = []) {
const restic = this;
let args = [];
args = args.concat(["tag", "--json"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "tag",
});
return result;
} catch (err) {
throw err;
}
}
/**
* restic snapshots
*
* @param {*} options
*/
async snapshots(options = []) {
const restic = this;
let args = [];
args = args.concat(["snapshots", "--json", "--no-lock"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
restic.parseTagsFromArgs(args);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "snapshots",
});
let snapshots = [];
result.parsed.forEach((item) => {
if (item.id) {
snapshots.push(item);
}
if (item.snapshots) {
snapshots.push(...item.snapshots);
}
});
return snapshots;
} catch (err) {
throw err;
}
}
/**
* restic snapshots
*
* @param {*} options
*/
async snapshot_exists(snapshot_id) {
const restic = this;
const snapshots = await restic.snapshots([snapshot_id]);
return snapshots.length > 0;
}
/**
* restic forget
*
* @param {*} options
*/
async forget(options = []) {
const restic = this;
let args = [];
args = args.concat(["forget", "--json"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "forget",
});
return result.parsed;
} catch (err) {
if (err.code == 1 && err.stderr.includes("no such file or directory")) {
return [];
}
throw err;
}
}
/**
* restic stats
*
* @param {*} options
*/
async stats(options = []) {
const restic = this;
let args = [];
args = args.concat(["stats", "--json", "--no-lock"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "stats",
timeout: 0, // can take a very long time to gather up details
});
return result.parsed;
} catch (err) {
throw err;
}
}
/**
* restic restore
*
* note that restore does not do any delete operations (ie: not like rsync --delete)
*
* @param {*} options
*/
async restore(options = []) {
const restic = this;
let args = ["restore", "--json", "--no-lock"];
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "restore",
timeout: 0,
});
return result.parsed;
} catch (err) {
if (err.code == 1 && err.stderr.includes("Fatal:")) {
const lines = err.stderr.split("\n").filter((item) => {
return Boolean(String(item).trim());
});
const last_line = lines[lines.length - 1];
const ingored_count = (err.stderr.match(/ignoring error/g) || [])
.length;
restic.options.logger.info(
`restic ignored error count: ${ingored_count}`
);
restic.options.logger.info(`restic stderr last line: ${last_line}`);
// if ignored count matches total count move on
// "Fatal: There were 2484 errors"
if (last_line.includes(String(ingored_count))) {
return err;
}
}
throw err;
}
}
trimResultData(result, options = {}) {
const trim_output_limt = options.max_entries || 50;
// trim stdout/stderr/parsed lines to X number
if (result.parsed && Array.isArray(result.parsed)) {
result.parsed = result.parsed.slice(trim_output_limt * -1);
}
result.stderr = result.stderr
.split("\n")
.slice(trim_output_limt * -1)
.join("\n");
result.stdout = result.stdout
.split("\n")
.slice(trim_output_limt * -1)
.join("\n");
return result;
}
parseTagsFromArgs(args) {
let tag_value_index;
let tags = args.filter((value, index) => {
if (String(value) == "--tag") {
tag_value_index = index + 1;
}
return tag_value_index == index;
});
tags = tags
.map((value) => {
if (value.includes(",")) {
return value.split(",");
}
return [value];
})
.flat();
return tags;
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const restic = this;
args = args || [];
if (restic.options.sudo) {
args.unshift(command);
command = restic.options.paths.sudo;
}
options.env = {
...{},
...process.env,
...restic.options.env,
...options.env,
};
const cleansedLog = `${command} ${args.join(" ")}`;
console.log("executing restic command: %s", cleansedLog);
return new Promise((resolve, reject) => {
let stdin;
if (options.stdin) {
stdin = options.stdin;
delete options.stdin;
}
const child = restic.options.executor.spawn(command, args, options);
if (stdin) {
child.stdin.write(stdin);
}
let stdout = "";
let stderr = "";
let code_override;
const log_progress_output = _.debounce(
(data) => {
let snapshot_id;
let path;
switch (options.operation) {
case "backup":
snapshot_id = `unknown_creating_new_snapshot_in_progress`;
path = args[args.length - 1];
break;
case "restore":
snapshot_id = args
.find((value) => {
return String(value).includes(":");
})
.split(":")[0];
let path_index;
path = args.find((value, index) => {
if (String(value) == "--target") {
path_index = index + 1;
}
return path_index == index;
});
break;
default:
return;
}
if (data.message_type == "status") {
delete data.current_files;
restic.options.logger.info(
`restic ${options.operation} progress: snapshot_id=${snapshot_id}, path=${path}`,
data
);
}
if (data.message_type == "summary") {
restic.options.logger.info(
`restic ${options.operation} summary: snapshot_id=${snapshot_id}, path=${path}`,
data
);
}
},
250,
{ leading: true, trailing: true, maxWait: 5000 }
);
child.stdout.on("data", function (data) {
data = String(data);
stdout += data;
switch (options.operation) {
case "backup":
case "restore":
try {
let parsed = JSON.parse(data);
log_progress_output(parsed);
} catch (err) {}
break;
}
});
child.stderr.on("data", function (data) {
data = String(data);
stderr += data;
if (
["forget", "snapshots"].includes(options.operation) &&
stderr.includes("no such file or directory")
) {
// short-circut the operation vs waiting for all the retries
// https://github.com/restic/restic/pull/2515
switch (options.operation) {
case "forget":
code_override = 1;
break;
case "snapshots":
code_override = 0;
break;
}
child.kill();
}
});
child.on("close", function (code) {
const result = { code, stdout, stderr, timeout: false };
if (!result.parsed) {
try {
result.parsed = JSON.parse(result.stdout);
} catch (err) {}
}
if (!result.parsed) {
try {
const lines = result.stdout.split("\n");
const parsed = [];
lines.forEach((line) => {
if (!line) {
return;
}
parsed.push(JSON.parse(line.trim()));
});
result.parsed = parsed;
} catch (err) {}
}
/**
* normalize array responses in scenarios where not enough came through
* to add newlines
*/
if (result.parsed && options.operation == "backup") {
if (!Array.isArray(result.parsed)) {
result.parsed = [result.parsed];
}
}
if (code == null && code_override != null) {
code = code_override;
}
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.Restic = Restic;

View File

@ -118,9 +118,6 @@ class Windows {
// -UseWriteThrough $true
// cannot have trailing slash nor a path
// must be \\<server>\<share>
//
// https://github.com/kubernetes-csi/csi-driver-smb/issues/219#issuecomment-781952587
// -Persistent $false
remotePath = this.uncPathToShare(remotePath);
command =
"$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential -RequirePrivacy $true";