Merge pull request #378 from democratic-csi/next

Next
This commit is contained in:
Travis Glenn Hansen 2024-03-26 11:29:06 -06:00 committed by GitHub
commit a6dec24a70
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
57 changed files with 10959 additions and 361 deletions

View File

@ -16,6 +16,7 @@ if [[ -n "${IMAGE_TAG}" ]]; then
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
--build-arg OBJECTIVEFS_DOWNLOAD_ID=${OBJECTIVEFS_DOWNLOAD_ID} \
.
else
:

View File

@ -15,23 +15,23 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.11.0
uses: styfle/cancel-workflow-action@0.12.1
with:
access_token: ${{ github.token }}
build-npm-linux-amd64:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 16
node-version: 20
- shell: bash
name: npm install
run: |
ci/bin/build.sh
- name: upload build
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: node-modules-linux-amd64
path: node_modules-linux-amd64.tar.gz
@ -40,16 +40,16 @@ jobs:
build-npm-windows-amd64:
runs-on: windows-2022
steps:
- uses: actions/checkout@v3
- uses: actions/setup-node@v3
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 16
node-version: 20
- shell: pwsh
name: npm install
run: |
ci\bin\build.ps1
- name: upload build
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: node-modules-windows-amd64
path: node_modules-windows-amd64.tar.gz
@ -69,8 +69,8 @@ jobs:
- X64
- csi-sanity-synology
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -99,8 +99,8 @@ jobs:
- X64
- csi-sanity-synology
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -115,17 +115,18 @@ jobs:
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
csi-sanity-truenas-scale-22_12:
csi-sanity-truenas-scale-24_04:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- truenas/scale/22.12/scale-iscsi.yaml
- truenas/scale/22.12/scale-nfs.yaml
- truenas/scale/24.04/scale-iscsi.yaml
- truenas/scale/24.04/scale-nfs.yaml
# 80 char limit
- truenas/scale/22.12/scale-smb.yaml
- truenas/scale/24.04/scale-smb.yaml
runs-on:
- self-hosted
- Linux
@ -133,8 +134,8 @@ jobs:
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -143,39 +144,7 @@ jobs:
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_22_12_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
csi-sanity-truenas-scale-23_10:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- truenas/scale/23.10/scale-iscsi.yaml
- truenas/scale/23.10/scale-nfs.yaml
# 80 char limit
- truenas/scale/23.10/scale-smb.yaml
runs-on:
- self-hosted
- Linux
- X64
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_23_10_HOST }}
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_24_04_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
@ -185,6 +154,7 @@ jobs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- truenas/core/13.0/core-iscsi.yaml
@ -198,8 +168,8 @@ jobs:
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -218,6 +188,7 @@ jobs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- zfs-generic/iscsi.yaml
@ -230,8 +201,8 @@ jobs:
- X64
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -244,6 +215,45 @@ jobs:
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
# client drivers
csi-sanity-objectivefs:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- objectivefs/objectivefs.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
OBJECTIVEFS_POOL: ${{ secrets.SANITY_OBJECTIVEFS_POOL }}
OBJECTIVEFS_LICENSE: ${{ secrets.SANITY_OBJECTIVEFS_LICENSE }}
OBJECTIVEFS_OBJECTSTORE: ${{ secrets.SANITY_OBJECTIVEFS_OBJECTSTORE }}
OBJECTIVEFS_ENDPOINT_PROTOCOL: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PROTOCOL }}
OBJECTIVEFS_ENDPOINT_HOST: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_HOST }}
OBJECTIVEFS_ENDPOINT_PORT: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PORT }}
OBJECTIVEFS_SECRET_KEY: ${{ secrets.SANITY_OBJECTIVEFS_SECRET_KEY }}
OBJECTIVEFS_ACCESS_KEY: ${{ secrets.SANITY_OBJECTIVEFS_ACCESS_KEY }}
OBJECTIVEFS_PASSPHRASE: ${{ secrets.SANITY_OBJECTIVEFS_PASSPHRASE }}
# these secrets need to match the above secrets for staging/etc
CSI_SANITY_SECRETS: /root/csi-secrets/objectivefs-secrets.yaml
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
# client drivers
csi-sanity-client:
needs:
@ -260,8 +270,8 @@ jobs:
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -288,8 +298,8 @@ jobs:
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-windows-amd64
- name: csi-sanity
@ -318,8 +328,8 @@ jobs:
- X64
- csi-sanity-zfs-local
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
@ -357,8 +367,8 @@ jobs:
- X64
- csi-sanity-local-hostpath
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: ${{ matrix.npmartifact }}
- name: csi-sanity
@ -381,8 +391,8 @@ jobs:
- Windows
- X64
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-windows-amd64
- name: csi-sanity
@ -425,10 +435,10 @@ jobs:
- determine-image-tag
- csi-sanity-synology-dsm6
- csi-sanity-synology-dsm7
- csi-sanity-truenas-scale-22_12
- csi-sanity-truenas-scale-23_10
- csi-sanity-truenas-scale-24_04
- csi-sanity-truenas-core-13_0
- csi-sanity-zfs-generic
- csi-sanity-objectivefs
- csi-sanity-client
- csi-sanity-client-windows
- csi-sanity-zfs-local
@ -436,7 +446,7 @@ jobs:
- csi-sanity-windows-node
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: docker build
run: |
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
@ -456,6 +466,7 @@ jobs:
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
OBJECTIVEFS_DOWNLOAD_ID: ${{ secrets.OBJECTIVEFS_DOWNLOAD_ID }}
DOCKER_CLI_EXPERIMENTAL: enabled
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
@ -464,10 +475,10 @@ jobs:
needs:
- csi-sanity-synology-dsm6
- csi-sanity-synology-dsm7
- csi-sanity-truenas-scale-22_12
- csi-sanity-truenas-scale-23_10
- csi-sanity-truenas-scale-24_04
- csi-sanity-truenas-core-13_0
- csi-sanity-zfs-generic
- csi-sanity-objectivefs
- csi-sanity-client
- csi-sanity-client-windows
- csi-sanity-zfs-local
@ -487,7 +498,7 @@ jobs:
nano_base_tag: ltsc2022
file: Dockerfile.Windows
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- name: docker build
shell: bash
run: |
@ -499,7 +510,7 @@ jobs:
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
- name: upload image tar
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
@ -514,11 +525,11 @@ jobs:
- self-hosted
- buildah
steps:
- uses: actions/checkout@v3
- uses: actions/download-artifact@v3
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: democratic-csi-windows-ltsc2019.tar
- uses: actions/download-artifact@v3
- uses: actions/download-artifact@v4
with:
name: democratic-csi-windows-ltsc2022.tar
- name: push windows images with buildah

View File

@ -1,10 +1,29 @@
# v1.9.0
Released 2024-03-26
- new `objectivefs` driver (https://objectivefs.com) support available for x86_64 and arm64
- TrueNAS
- SCALE 24.04 support
- fix `sudo` issue during resize operations (see #295)
- fix version detection logic and default to api version 2 (see #351)
- more robust `Probe` implementation
- contaimer images
- various fixes, improvements, dep upgrades, etc
- update container images to `debian:12` (bookworm)
- bump to nodejs-lts-iron from nodejs-lts-hydrogen
- support csi v1.6.0-v1.9.0
- allow `noop` delete operations (dangerous, only use if you _really_ know what you are doing, see #289)
- properly adhere to the `zvolDedup` and `zvolCompression` settings (see #322)
- `restic` and `kopia` support as a snapshot solution for `local-hostpath` and `*-client` drivers
# v1.8.4
Released 2023-11-09
- allow templatized `volume_id` (dangerous, only use if you *really* know what you are doing)
- fix SCALE iscsi resize issue
- SCALE 23.10 support
- allow templatized `volume_id` (dangerous, only use if you _really_ know what you are doing)
- fix TrueNAS SCALE iscsi resize issue
- TrueNAS SCALE 23.10 support
- minor improvements/fixes throughout
- dependency updates

View File

@ -1,4 +1,4 @@
FROM debian:11-slim AS build
FROM debian:12-slim AS build
#FROM --platform=$BUILDPLATFORM debian:10-slim AS build
ENV DEBIAN_FRONTEND=noninteractive
@ -9,14 +9,14 @@ ARG BUILDPLATFORM
RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM"
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8
ENV NODE_VERSION=v16.18.0
ENV NODE_VERSION=v20.11.1
ENV NODE_ENV=production
# install build deps
RUN apt-get update && apt-get install -y python make cmake gcc g++
RUN apt-get update && apt-get install -y python3 make cmake gcc g++
# install node
RUN apt-get update && apt-get install -y wget xz-utils
@ -26,8 +26,8 @@ ENV PATH=/usr/local/lib/nodejs/bin:$PATH
# Run as a non-root user
RUN useradd --create-home csi \
&& mkdir /home/csi/app \
&& chown -R csi: /home/csi
&& mkdir /home/csi/app \
&& chown -R csi: /home/csi
WORKDIR /home/csi/app
USER csi
@ -40,31 +40,33 @@ RUN rm -rf docker
######################
# actual image
######################
FROM debian:11-slim
FROM debian:12-slim
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.licenses MIT
ENV DEBIAN_FRONTEND=noninteractive
ENV DEMOCRATIC_CSI_IS_CONTAINER=true
ARG TARGETPLATFORM
ARG BUILDPLATFORM
ARG OBJECTIVEFS_DOWNLOAD_ID
RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM"
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8
ENV NODE_ENV=production
# Workaround for https://github.com/nodejs/node/issues/37219
RUN test $(uname -m) != armv7l || ( \
apt-get update \
&& apt-get install -y libatomic1 \
&& rm -rf /var/lib/apt/lists/* \
)
apt-get update \
&& apt-get install -y libatomic1 \
&& rm -rf /var/lib/apt/lists/* \
)
# install node
#ENV PATH=/usr/local/lib/nodejs/bin:$PATH
@ -75,14 +77,31 @@ COPY --from=build /usr/local/lib/nodejs/bin/node /usr/local/bin/node
# netbase is required by rpcbind/rpcinfo to work properly
# /etc/{services,rpc} are required
RUN apt-get update && \
apt-get install -y netbase socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli && \
rm -rf /var/lib/apt/lists/*
apt-get install -y wget netbase zip bzip2 socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli fuse3 && \
rm -rf /var/lib/apt/lists/*
ARG RCLONE_VERSION=1.66.0
ADD docker/rclone-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/rclone-installer.sh && rclone-installer.sh
ARG RESTIC_VERSION=0.16.4
ADD docker/restic-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/restic-installer.sh && restic-installer.sh
ARG KOPIA_VERSION=0.16.1
ADD docker/kopia-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/kopia-installer.sh && kopia-installer.sh
# controller requirements
#RUN apt-get update && \
# apt-get install -y ansible && \
# rm -rf /var/lib/apt/lists/*
# install objectivefs
ARG OBJECTIVEFS_VERSION=7.2
ADD docker/objectivefs-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/objectivefs-installer.sh && objectivefs-installer.sh
# install wrappers
ADD docker/iscsiadm /usr/local/sbin
RUN chmod +x /usr/local/sbin/iscsiadm
@ -90,6 +109,9 @@ RUN chmod +x /usr/local/sbin/iscsiadm
ADD docker/multipath /usr/local/sbin
RUN chmod +x /usr/local/sbin/multipath
ADD docker/simple-file-writer /usr/local/bin
RUN chmod +x /usr/local/bin/simple-file-writer
## USE_HOST_MOUNT_TOOLS=1
ADD docker/mount /usr/local/bin/mount
RUN chmod +x /usr/local/bin/mount
@ -107,7 +129,7 @@ RUN chmod +x /usr/local/bin/oneclient
# Run as a non-root user
RUN useradd --create-home csi \
&& chown -R csi: /home/csi
&& chown -R csi: /home/csi
COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app

View File

@ -30,6 +30,7 @@ have access to resizing, snapshots, clones, etc functionality.
- `zfs-local-dataset` (provision node-local volume as dataset)
- `zfs-local-zvol` (provision node-local volume as zvol)
- `synology-iscsi` experimental (manages volumes to share over iscsi)
- `objectivefs` experimental (manages objectivefs volumes)
- `lustre-client` (crudely provisions storage using a shared lustre
share/directory for all volumes)
- `nfs-client` (crudely provisions storage using a shared nfs share/directory
@ -188,8 +189,10 @@ node:
and continue your democratic installation as usuall with other iscsi drivers.
#### Privileged Namespace
democratic-csi requires privileged access to the nodes, so the namespace should allow for privileged pods. One way of doing it is via [namespace labels](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/).
Add the followin label to the democratic-csi installation namespace `pod-security.kubernetes.io/enforce=privileged`
```
kubectl label --overwrite namespace democratic-csi pod-security.kubernetes.io/enforce=privileged
```
@ -333,8 +336,9 @@ with much older versions as well.
The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be
used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh`
connections and do all operations entirely with the TrueNAS api. With that in
mind, any ssh/shell/etc requirements below can be safely ignored. Also note the
following known issues:
mind, any ssh/shell/etc requirements below can be safely ignored. The minimum
volume size through the api is `1G` so beware that requested volumes with a
size small will be increased to `1G`. Also note the following known issues:
- https://jira.ixsystems.com/browse/NAS-111870
- https://github.com/democratic-csi/democratic-csi/issues/112
@ -534,6 +538,26 @@ saveconfig /etc/nvmet/config.json
Ensure iscsi manager has been installed and is generally setup/configured. DSM 6.3+ is supported.
### objectivefs (objectivefs)
ObjectiveFS requires the use of an _Admin Key_ to properly automate the
lifecycle of filesystems. Each deployment of the driver will point to a single
`pool` (bucket) and create individual `filesystems` within that bucket
on-demand.
Ensure the config value used for `pool` is an existing bucket. Be sure the
bucket is _NOT_ being used in fs mode (ie: the whole bucket is a single fs).
The `democratic-csi` `node` container will host the fuse mount process so
be careful to only upgrade when all relevant workloads have been drained from
the respective node. Also beware that any cpu/memory limits placed on the
container by the orchestration system will impact any ability to use the
caching, etc features of objectivefs.
- https://objectivefs.com/howto/objectivefs-admin-key-setup
- https://objectivefs.com/features#filesystem-pool
- https://objectivefs.com/howto/how-to-create-a-filesystem-with-an-existing-empty-bucket
## Helm Installation
```bash
@ -648,12 +672,6 @@ Copy the `contrib/freenas-provisioner-to-democratic-csi.sh` script from the
project to your workstation, read the script in detail, and edit the variables
to your needs to start migrating!
# Sponsors
A special shout out to the wonderful sponsors of the project!
[![ixSystems](https://www.ixsystems.com/wp-content/uploads/2021/06/ix_logo_200x47.png "ixSystems")](http://ixsystems.com/)
# Related
- https://github.com/nmaupu/freenas-provisioner

View File

@ -63,6 +63,10 @@ const args = require("yargs")
"1.3.0",
"1.4.0",
"1.5.0",
"1.6.0",
"1.7.0",
"1.8.0",
"1.9.0",
],
})
.demandOption(["csi-version"], "csi-version is required")
@ -103,6 +107,7 @@ if (!args.serverSocket && !args.serverAddress && !args.serverPort) {
}
//console.log(args);
//console.log(process.env);
const package = require("../package.json");
args.version = package.version;
@ -397,10 +402,58 @@ logger.info(
bindSocket
);
[`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`].forEach(
const signalMapping = {
1: "SIGHUP",
2: "SIGINT",
3: "SIGQUIT",
4: "SIGILL",
5: "SIGTRAP",
6: "SIGABRT",
7: "SIGEMT",
8: "SIGFPE",
9: "SIGKILL",
10: "SIGBUS",
11: "SIGSEGV",
12: "SIGSYS",
13: "SIGPIPE",
14: "SIGALRM",
15: "SIGTERM",
16: "SIGURG",
17: "SIGSTOP",
18: "SIGTSTP",
19: "SIGCONT",
20: "SIGCHLD",
21: "SIGTTIN",
22: "SIGTTOU",
23: "SIGIO",
24: "SIGXCPU",
25: "SIGXFSZ",
26: "SIGVTALRM",
27: "SIGPROF",
28: "SIGWINCH",
29: "SIGINFO",
30: "SIGUSR1",
31: "SIGUSR2",
};
[(`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`)].forEach(
(eventType) => {
process.on(eventType, async (code) => {
console.log(`running server shutdown, exit code: ${code}`);
let codeNumber = null;
let codeName = null;
if (code > 0) {
codeNumber = code;
codeName = signalMapping[code];
} else {
codeNumber = Object.keys(signalMapping).find(
(key) => signalMapping[key] === code
);
codeName = code;
}
console.log(
`running server shutdown, exit code: ${codeNumber} (${codeName})`
);
// attempt clean shutdown of in-flight requests
try {
@ -431,7 +484,7 @@ logger.info(
}
console.log("server fully shutdown, exiting");
process.exit(code);
process.exit(codeNumber);
});
}
);

View File

@ -34,6 +34,7 @@ $exeargs += "-csi.mountdir", "${env:CSI_SANITY_TEMP_DIR}\mnt"
$exeargs += "-csi.stagingdir", "${env:CSI_SANITY_TEMP_DIR}\stage"
$exeargs += "-csi.testvolumeexpandsize", "2147483648"
$exeargs += "-csi.testvolumesize", "1073741824"
$exeargs += "--csi.secrets", "${env:CSI_SANITY_SECRETS}"
$exeargs += "-ginkgo.skip", "${env:CSI_SANITY_SKIP}"
$exeargs += "-ginkgo.focus", "${env:CSI_SANITY_FOCUS}"

View File

@ -7,7 +7,7 @@ set -x
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
: ${CSI_SANITY_TEMP_DIR:=$(mktemp -d -t ci-csi-sanity-tmp-XXXXXXXX)}
if [[ ! -S "${CSI_ENDPOINT}" ]];then
if [[ ! -S "${CSI_ENDPOINT}" ]]; then
echo "csi socket: ${CSI_ENDPOINT} does not exist"
exit 1
fi
@ -15,27 +15,29 @@ fi
trap ctrl_c INT
function ctrl_c() {
echo "Trapped CTRL-C"
exit 1
echo "Trapped CTRL-C"
exit 1
}
chmod g+w,o+w "${CSI_ENDPOINT}";
mkdir -p "${CSI_SANITY_TEMP_DIR}";
rm -rf "${CSI_SANITY_TEMP_DIR}"/*;
chmod -R 777 "${CSI_SANITY_TEMP_DIR}";
chmod g+w,o+w "${CSI_ENDPOINT}"
mkdir -p "${CSI_SANITY_TEMP_DIR}"
rm -rf "${CSI_SANITY_TEMP_DIR}"/*
chmod -R 777 "${CSI_SANITY_TEMP_DIR}"
# https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity
# FOR DEBUG: --ginkgo.v
# --csi.secrets=<path to secrets file>
#
# expand size 2073741824 to have mis-alignments
# expand size 2147483648 to have everything line up nicely
csi-sanity --csi.endpoint "unix://${CSI_ENDPOINT}" \
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
--csi.testvolumeexpandsize 2147483648 \
--csi.testvolumesize 1073741824 \
-ginkgo.skip "${CSI_SANITY_SKIP}" \
-ginkgo.focus "${CSI_SANITY_FOCUS}"
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
--csi.testvolumeexpandsize 2147483648 \
--csi.testvolumesize 1073741824 \
--csi.secrets="${CSI_SANITY_SECRETS}" \
-ginkgo.skip "${CSI_SANITY_SKIP}" \
-ginkgo.focus "${CSI_SANITY_FOCUS}"
rm -rf "${CSI_SANITY_TEMP_DIR}"

View File

@ -8,7 +8,7 @@ Set-Location $env:PWD
Write-Output "launching server"
$env:LOG_LEVEL = "debug"
$env:CSI_VERSION = "1.5.0"
$env:CSI_VERSION = "1.9.0"
$env:CSI_NAME = "driver-test"
$env:CSI_SANITY = "1"

View File

@ -9,21 +9,21 @@ echo "current launch-server PATH: ${PATH}"
: ${CI_BUILD_KEY:="local"}
: ${TEMPLATE_CONFIG_FILE:=${1}}
: ${CSI_MODE:=""}
: ${CSI_VERSION:="1.5.0"}
: ${CSI_VERSION:="1.9.0"}
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
: ${LOG_PATH:=/tmp/csi-${CI_BUILD_KEY}.log}
if [[ "x${CONFIG_FILE}" == "x" ]];then
if [[ "x${CONFIG_FILE}" == "x" ]]; then
: ${CONFIG_FILE:=/tmp/csi-config-${CI_BUILD_KEY}.yaml}
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]];then
envsubst < "${TEMPLATE_CONFIG_FILE}" > "${CONFIG_FILE}"
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]]; then
envsubst <"${TEMPLATE_CONFIG_FILE}" >"${CONFIG_FILE}"
fi
fi
if [[ "x${CSI_MODE}" != "x" ]];then
if [[ "x${CSI_MODE}" != "x" ]]; then
EXTRA_ARGS="--csi-mode ${CSI_MODE} ${EXTRA_ARGS}"
fi
# > "${LOG_PATH}" 2>&1
# > "${LOG_PATH}" 2>&1
exec ./bin/democratic-csi --log-level debug --driver-config-file "${CONFIG_FILE}" --csi-version "${CSI_VERSION}" --csi-name "driver-test" --server-socket "${CSI_ENDPOINT}" ${EXTRA_ARGS}

View File

@ -0,0 +1,20 @@
driver: objectivefs
objectivefs:
pool: ${OBJECTIVEFS_POOL}
cli:
sudoEnabled: false
env:
OBJECTIVEFS_LICENSE: ${OBJECTIVEFS_LICENSE}
OBJECTSTORE: ${OBJECTIVEFS_OBJECTSTORE}
ENDPOINT: ${OBJECTIVEFS_ENDPOINT_PROTOCOL}://${OBJECTIVEFS_ENDPOINT_HOST}:${OBJECTIVEFS_ENDPOINT_PORT}
SECRET_KEY: ${OBJECTIVEFS_SECRET_KEY}
ACCESS_KEY: ${OBJECTIVEFS_ACCESS_KEY}
OBJECTIVEFS_PASSPHRASE: ${OBJECTIVEFS_PASSPHRASE}
_private:
csi:
volume:
idHash:
# max volume name length is 63
strategy: crc32

View File

@ -0,0 +1,38 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -0,0 +1,29 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -0,0 +1,50 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -17,25 +17,60 @@ SCRIPTDIR="$(
cd "${SCRIPTDIR}"
: "${NVMETCONFIG:="${SCRIPTDIR}/nvmet-config.json"}"
: "${NVMETVENV:="${SCRIPTDIR}/nvmet-venv"}"
export PATH=${HOME}/.local/bin:${PATH}
modules=()
modules+=("nvmet")
modules+=("nvmet-fc")
modules+=("nvmet-rdma")
modules+=("nvmet-tcp")
main() {
for module in "${modules[@]}"; do
modprobe "${module}"
done
which nvmetcli &>/dev/null || {
which pip &>/dev/null || {
wget -O get-pip.py https://bootstrap.pypa.io/get-pip.py
python get-pip.py --user
rm get-pip.py
kernel_modules
nvmetcli ls &>/dev/null || {
setup_venv
install_nvmetcli
}
nvmetcli_restore
}
kernel_modules() {
modules=()
modules+=("nvmet")
modules+=("nvmet-fc")
modules+=("nvmet-rdma")
modules+=("nvmet-tcp")
for module in "${modules[@]}"; do
modprobe "${module}"
done
}
setup_venv() {
rm -rf ${NVMETVENV}
python -m venv ${NVMETVENV} --without-pip --system-site-packages
activate_venv
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python get-pip.py
rm get-pip.py
deactivate_venv
}
activate_venv() {
. ${NVMETVENV}/bin/activate
}
deactivate_venv() {
deactivate
}
install_nvmetcli() {
if [[ ! -d nvmetcli ]]; then
git clone git://git.infradead.org/users/hch/nvmetcli.git
@ -43,19 +78,31 @@ which nvmetcli &>/dev/null || {
cd nvmetcli
# install to root home dir
python3 setup.py install --user
activate_venv
# install to root home dir
pip install configshell_fb --user
python3 setup.py install --install-scripts=${HOME}/.local/bin
# install to root home dir
pip install configshell_fb
# remove source
cd "${SCRIPTDIR}"
rm -rf nvmetcli
deactivate_venv
}
cd "${SCRIPTDIR}"
nvmetcli restore "${NVMETCONFIG}"
nvmetcli_restore() {
touch /var/run/nvmet-config-loaded
chmod +r /var/run/nvmet-config-loaded
activate_venv
cd "${SCRIPTDIR}"
nvmetcli restore "${NVMETCONFIG}"
deactivate_venv
touch /var/run/nvmet-config-loaded
chmod +r /var/run/nvmet-config-loaded
}
main

1636
csi_proto/csi-v1.6.0.proto Normal file

File diff suppressed because it is too large Load Diff

1636
csi_proto/csi-v1.7.0.proto Normal file

File diff suppressed because it is too large Load Diff

1856
csi_proto/csi-v1.8.0.proto Normal file

File diff suppressed because it is too large Load Diff

1914
csi_proto/csi-v1.9.0.proto Normal file

File diff suppressed because it is too large Load Diff

6
csi_proto/download-proto.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
# v1.6.0
VERSION=${1}
curl -v -o "csi-${VERSION}.proto" https://raw.githubusercontent.com/container-storage-interface/spec/${VERSION}/csi.proto

36
docker/kopia-installer.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="armhf"
else
echo "unsupported/unknown kopia PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing kopia $KOPIA_VERSION"
export DEB_FILE="kopia.deb"
wget -O "${DEB_FILE}" "https://github.com/kopia/kopia/releases/download/v${KOPIA_VERSION}/kopia_${KOPIA_VERSION}_linux_${PLATFORM_ARCH}.deb"
dpkg -i "${DEB_FILE}"
rm "${DEB_FILE}"

35
docker/objectivefs-installer.sh Executable file
View File

@ -0,0 +1,35 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then
export OBJECTIVEFS_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export OBJECTIVEFS_ARCH="arm64"
else
echo "unsupported/unknown PLATFORM ${PLATFORM}"
exit 0
fi
export DEB_FILE="objectivefs_${OBJECTIVEFS_VERSION}_${OBJECTIVEFS_ARCH}.deb"
echo "I am installing objectivefs $OBJECTIVEFS_VERSION"
wget "https://objectivefs.com/user/download/${OBJECTIVEFS_DOWNLOAD_ID}/${DEB_FILE}"
dpkg -i "${DEB_FILE}"
rm "${DEB_FILE}"

41
docker/rclone-installer.sh Executable file
View File

@ -0,0 +1,41 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="arm-v7"
else
echo "unsupported/unknown restic PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing rclone $RCLONE_VERSION"
export ZIP_FILE="rclone.zip"
wget -O "${ZIP_FILE}" "https://github.com/rclone/rclone/releases/download/v${RCLONE_VERSION}/rclone-v${RCLONE_VERSION}-linux-${PLATFORM_ARCH}.zip"
unzip "${ZIP_FILE}"
mv rclone-*-linux-*/rclone /usr/local/bin/rclone
rm -rf rclone-*-linux-*
chown root:root /usr/local/bin/rclone
chmod +x /usr/local/bin/rclone

42
docker/restic-installer.sh Executable file
View File

@ -0,0 +1,42 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="arm"
elif [ "$PLATFORM" = "linux/s390x" ]; then
export PLATFORM_ARCH="s390x"
elif [ "$PLATFORM" = "linux/ppc64le" ]; then
export PLATFORM_ARCH="ppc64le"
else
echo "unsupported/unknown restic PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing restic $RESTIC_VERSION"
export TAR_FILE="restic.bz2"
wget -O "${TAR_FILE}" "https://github.com/restic/restic/releases/download/v${RESTIC_VERSION}/restic_${RESTIC_VERSION}_linux_${PLATFORM_ARCH}.bz2"
bunzip2 "${TAR_FILE}"
mv restic /usr/local/bin
chown root:root /usr/local/bin/restic
chmod +x /usr/local/bin/restic

3
docker/simple-file-writer Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
echo ${1} > ${2}

View File

@ -0,0 +1,6 @@
# common options for the controller service
csi:
# manual override of the available access modes for the deployment
# generally highly uncessary to alter so only use in advanced scenarios
#access_modes: []

View File

@ -33,7 +33,7 @@ zfs:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
# total volume name (zvol/<datasetParentName>/<pvc name>) length cannot exceed 63 chars
# https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
# standard volume naming overhead is 46 chars
@ -41,7 +41,7 @@ zfs:
# for work-arounds see https://github.com/democratic-csi/democratic-csi/issues/54
datasetParentName: tank/k8s/b/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
# "" (inherit), lz4, gzip-9, etc
@ -68,6 +68,8 @@ iscsi:
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
# https://github.com/democratic-csi/democratic-csi/issues/302
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
- targetGroupPortalGroup: 1
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1

View File

@ -43,14 +43,14 @@ zfs:
# "org.freenas:description": "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}/{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# "org.freenas:test": "{{ parameters.foo }}"
# "org.freenas:test2": "some value"
# total volume name (zvol/<datasetParentName>/<pvc name>) length cannot exceed 63 chars
# https://www.ixsystems.com/documentation/freenas/11.2-U5/storage.html#zfs-zvol-config-opts-tab
# standard volume naming overhead is 46 chars
# datasetParentName should therefore be 17 chars or less when using TrueNAS 12 or below
datasetParentName: tank/k8s/b/vols
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
# they may be siblings, but neither should be nested in the other
# they may be siblings, but neither should be nested in the other
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
# "" (inherit), lz4, gzip-9, etc
@ -77,6 +77,8 @@ iscsi:
# add as many as needed
targetGroups:
# get the correct ID from the "portal" section in the UI
# https://github.com/democratic-csi/democratic-csi/issues/302
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
- targetGroupPortalGroup: 1
# get the correct ID from the "initiators" section in the UI
targetGroupInitiatorGroup: 1

View File

@ -3,8 +3,57 @@ instance_id:
local-hostpath:
# generally shareBasePath and controllerBasePath should be the same for this
# driver, this path should be mounted into the csi-driver container
shareBasePath: "/var/lib/csi-local-hostpath"
shareBasePath: "/var/lib/csi-local-hostpath"
controllerBasePath: "/var/lib/csi-local-hostpath"
dirPermissionsMode: "0777"
dirPermissionsUser: 0
dirPermissionsGroup: 0
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# of local-hostpath the node name will be appended
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# of local-hostpath the node name will be appended
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -9,3 +9,50 @@ lustre:
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# backup hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -8,3 +8,50 @@ nfs:
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -0,0 +1,51 @@
---
apiVersion: v1
kind: Secret
metadata:
name: objectivefs-secret
namespace: kube-system
stringData:
# these can be defined here OR in volumeAttributes
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
"env.OBJECTSTORE": ""
"env.ACCESS_KEY": ""
"env.SECRET_KEY": ""
"env.OBJECTIVEFS_PASSPHRASE": ""
# does NOT need admin key appended for node-manual operations
"env.OBJECTIVEFS_LICENSE": ""
"env.ENDPOINT": ""
# ...
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: objectivefs-manual
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Retain
mountOptions:
[]
# https://objectivefs.com/userguide#mount
#- nodiratime
#- noatime
#- fsavail=<size>
csi:
driver: org.democratic-csi.node-manual
readOnly: false
fsType: objectivefs
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
nodeStageSecretRef:
name: objectivefs-secret
namespace: kube-system
volumeAttributes:
node_attach_driver: objectivefs
provisioner_driver: node-manual
filesystem: "ofs/test"
# these can be defined here OR in the secret referenced above
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
#"env.OBJECTSTORE": "minio://"
#"env.ACCESS_KEY": ""
# ...

32
examples/objectivefs.yaml Normal file
View File

@ -0,0 +1,32 @@
driver: objectivefs
objectivefs:
# note, ALL provisioned filesystems will be created in this pool / bucket
# with the same passphrase entered below
#
# in general this pool should be considered as fully managed by democratic-csi
# so a dedicated pool per-cluster / deployment would be best practice
#
pool: ofscsi
cli:
sudoEnabled: false
env:
# NOTE: this must be the license key + admin key
# admin key feature must be activated on your account
# https://objectivefs.com/howto/objectivefs-admin-key-setup
OBJECTIVEFS_LICENSE:
OBJECTSTORE:
ENDPOINT:
SECRET_KEY:
ACCESS_KEY:
# do NOT change this once it has been set and deployed
OBJECTIVEFS_PASSPHRASE:
# ...
_private:
csi:
volume:
idHash:
# due to 63 char limit on objectivefs fs name, we should
# hash volume names to prevent fs names which are too long
# can be 1 of md5, crc8, crc16, crc32
strategy: crc32

View File

@ -14,6 +14,7 @@ _private:
#driver: kubernetes
# THIS IS UNSUPPORTED, BAD THINGS WILL HAPPEN IF NOT CONFIGURED PROPERLY
# https://github.com/democratic-csi/democratic-csi/issues/289
#
# note the volume length must *always* be the same for every call for the same volume by the CO
# the length must NOT execeed 128 characters
@ -21,6 +22,16 @@ _private:
# must only contain alphnumeric characters or `-` or `_`
idTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
# THIS IS UNSUPPORTED, BAD THINGS WILL HAPPEN IF NOT CONFIGURED PROPERLY
# https://github.com/democratic-csi/democratic-csi/issues/289
#
# in order for this to behave sanely you *MUST* set consistent templates for
# share names/assets (ie: nfs/iscsi/etc) and the `idTemplate` above
#
# setting to retain results in noop delete opertions (both shares where applicable and volumes remain intact)
# delete|retain
deleteStrategy: retain
# if set, this hash is applied *after* the templating above
idHash:
strategy: crc16

View File

@ -8,3 +8,50 @@ smb:
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: wheel
snapshots:
# can create multiple snapshot classes each with a parameters.driver value which
# overrides the default, a single install can use all 3 simultaneously if desired
#
# available options:
# - filecopy = rsync/cp
# - restic
# - kopia
#
default_driver: filecopy
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
restic:
global_flags: []
# - --insecure-tls
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
# host will be set to csi driver name
tags: []
# - foobar
# - baz=bar
# automatically prune when a snapshot is deleted
prune: true
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
env: {}
# RESTIC_PASSWORD
# RESTIC_REPOSITORY
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
# snapshot hostname will be set to the csiDriver.name value, in the case
# it is assumed that the repo has been created beforehand
kopia:
# kopia repository status -t -s
config_token:
global_flags: []
# <key>:<value>
tags: []
# - "foobar:true"
env: {}

View File

@ -70,7 +70,11 @@ iscsi:
#password: "bar"
# mutual CHAP
#mutual_userid: "baz"
#mutual_password: "bar"
#mutual_password: "bar"
block:
attributes:
# set to 1 to enable Thin Provisioning Unmap
emulate_tpu: 0
targetPortal: "server[:port]"
# for multipath
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]

View File

@ -20,7 +20,7 @@ zfs:
# zpool: /usr/local/sbin/zpool
# sudo: /usr/local/bin/sudo
# chroot: /usr/sbin/chroot
# can be used to set arbitrary values on the dataset/zvol
# can use handlebars templates with the parameters from the storage class/CO
#datasetProperties:
@ -65,6 +65,7 @@ nvmeof:
# http://git.infradead.org/users/hch/nvmetcli.git
shareStrategyNvmetCli:
#sudoEnabled: true
# /root/.local/bin/nvmetcli
#nvmetcliPath: nvmetcli
# prevent startup race conditions by ensuring the config on disk has been imported
# before we start messing with things
@ -73,7 +74,7 @@ nvmeof:
basename: "nqn.2003-01.org.linux-nvme"
# add more ports here as appropriate if you have multipath
ports:
- "1"
- "1"
subsystem:
attributes:
allow_any_host: 1
@ -96,7 +97,7 @@ nvmeof:
attributes:
allow_any_host: "true"
listeners:
- trtype: tcp
traddr: server
trsvcid: port
adrfam: ipv4
- trtype: tcp
traddr: server
trsvcid: port
adrfam: ipv4

259
package-lock.json generated
View File

@ -1,12 +1,12 @@
{
"name": "democratic-csi",
"version": "1.8.4",
"version": "1.9.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "democratic-csi",
"version": "1.8.4",
"version": "1.9.0",
"license": "MIT",
"dependencies": {
"@grpc/grpc-js": "^1.8.4",
@ -85,9 +85,9 @@
}
},
"node_modules/@eslint/eslintrc": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.3.tgz",
"integrity": "sha512-yZzuIG+jnVu6hNSzFEN07e8BxF3uAzYtQb6uDkaYZLo6oYZDCq454c5kB8zxnzfCYyP4MIuyBn10L0DqwujTmA==",
"version": "2.1.4",
"resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz",
"integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==",
"dev": true,
"dependencies": {
"ajv": "^6.12.4",
@ -108,24 +108,24 @@
}
},
"node_modules/@eslint/js": {
"version": "8.53.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.53.0.tgz",
"integrity": "sha512-Kn7K8dx/5U6+cT1yEhpX1w4PCSg0M+XyRILPgvwcEBjerFWCwQj5sbr3/VmxqV0JGHCBCzyd6LxypEuehypY1w==",
"version": "8.57.0",
"resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz",
"integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==",
"dev": true,
"engines": {
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
}
},
"node_modules/@grpc/grpc-js": {
"version": "1.9.9",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.9.9.tgz",
"integrity": "sha512-vQ1qwi/Kiyprt+uhb1+rHMpyk4CVRMTGNUGGPRGS7pLNfWkdCHrGEnT6T3/JyC2VZgoOX/X1KwdoU0WYQAeYcQ==",
"version": "1.10.3",
"resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.10.3.tgz",
"integrity": "sha512-qiO9MNgYnwbvZ8MK0YLWbnGrNX3zTcj6/Ef7UHu5ZofER3e2nF3Y35GaPo9qNJJ/UJQKa4KL+z/F4Q8Q+uCdUQ==",
"dependencies": {
"@grpc/proto-loader": "^0.7.8",
"@types/node": ">=12.12.47"
"@grpc/proto-loader": "^0.7.10",
"@js-sdsl/ordered-map": "^4.4.2"
},
"engines": {
"node": "^8.13.0 || >=10.10.0"
"node": ">=12.10.0"
}
},
"node_modules/@grpc/proto-loader": {
@ -146,13 +146,13 @@
}
},
"node_modules/@humanwhocodes/config-array": {
"version": "0.11.13",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.13.tgz",
"integrity": "sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==",
"version": "0.11.14",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
"integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==",
"dev": true,
"dependencies": {
"@humanwhocodes/object-schema": "^2.0.1",
"debug": "^4.1.1",
"@humanwhocodes/object-schema": "^2.0.2",
"debug": "^4.3.1",
"minimatch": "^3.0.5"
},
"engines": {
@ -173,11 +173,20 @@
}
},
"node_modules/@humanwhocodes/object-schema": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.1.tgz",
"integrity": "sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==",
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.2.tgz",
"integrity": "sha512-6EwiSjwWYP7pTckG6I5eyFANjPhmPjUX9JRLUSfNPC7FX7zK9gyZAfUEaECL6ALTpGX5AjnBq3C9XmVWPitNpw==",
"dev": true
},
"node_modules/@js-sdsl/ordered-map": {
"version": "4.4.2",
"resolved": "https://registry.npmjs.org/@js-sdsl/ordered-map/-/ordered-map-4.4.2.tgz",
"integrity": "sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==",
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/js-sdsl"
}
},
"node_modules/@kubernetes/client-node": {
"version": "0.18.1",
"resolved": "https://registry.npmjs.org/@kubernetes/client-node/-/client-node-0.18.1.tgz",
@ -204,14 +213,6 @@
"openid-client": "^5.3.0"
}
},
"node_modules/@kubernetes/client-node/node_modules/@types/node": {
"version": "18.18.9",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.9.tgz",
"integrity": "sha512-0f5klcuImLnG4Qreu9hPj/rEfFq6YRc5n2mAjSsH+ec/mJL+3voBH0+8T7o8RpFjH7ovc+TRsL/c7OYIQsPTfQ==",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/@nodelib/fs.scandir": {
"version": "2.1.5",
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
@ -312,9 +313,9 @@
"integrity": "sha512-k4MGaQl5TGo/iipqb2UDG2UwjXziSWkh0uysQelTlJpX1qGlpUZYm8PnO4DxG1qBomtJUdYJ6qR6xdIah10JLg=="
},
"node_modules/@types/node": {
"version": "20.9.0",
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.9.0.tgz",
"integrity": "sha512-nekiGu2NDb1BcVofVcEKMIwzlx4NjHlcjhoxxKBNLtz15Y1z7MYf549DFvkHSId02Ax6kGwWntIBPC3l/JZcmw==",
"version": "18.19.26",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.26.tgz",
"integrity": "sha512-+wiMJsIwLOYCvUqSdKTrfkS8mpTp+MPINe6+Np4TAGFWWRWiBQ5kSq9nZGCSPkzx9mvT+uEukzpX4MOSCydcvw==",
"dependencies": {
"undici-types": "~5.26.4"
}
@ -341,9 +342,9 @@
"integrity": "sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw=="
},
"node_modules/@types/ws": {
"version": "8.5.9",
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.9.tgz",
"integrity": "sha512-jbdrY0a8lxfdTp/+r7Z4CkycbOFN8WX+IOchLJr3juT/xzbJ8URyTVSJ/hvNdadTgM1mnedb47n+Y31GsFnQlg==",
"version": "8.5.10",
"resolved": "https://registry.npmjs.org/@types/ws/-/ws-8.5.10.tgz",
"integrity": "sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==",
"dependencies": {
"@types/node": "*"
}
@ -355,9 +356,9 @@
"dev": true
},
"node_modules/acorn": {
"version": "8.11.2",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz",
"integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==",
"version": "8.11.3",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
"integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==",
"dev": true,
"bin": {
"acorn": "bin/acorn"
@ -439,9 +440,9 @@
"integrity": "sha512-spZRyzKL5l5BZQrr/6m/SqFdBN0q3OCI0f9rjfBzCMBIP4p75P620rR3gTmaksNOhmzgdxcaxdNfMy6anrbM0g=="
},
"node_modules/async-mutex": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.4.0.tgz",
"integrity": "sha512-eJFZ1YhRR8UN8eBLoNzcDPcy/jqjsg6I1AP+KvWQX80BqOSW1oJPJXDylPUEeMr2ZQvHgnQ//Lp6f3RQ1zI7HA==",
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/async-mutex/-/async-mutex-0.4.1.tgz",
"integrity": "sha512-WfoBo4E/TbCX1G95XTjbWTE3X2XLG0m1Xbv2cwOtuPdyH9CZvnaA5nCt1ucjaKEgW2A5IF71hxrRhr83Je5xjA==",
"dependencies": {
"tslib": "^2.4.0"
}
@ -465,11 +466,11 @@
"integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg=="
},
"node_modules/axios": {
"version": "1.6.1",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.1.tgz",
"integrity": "sha512-vfBmhDpKafglh0EldBEbVuoe7DyAavGSLWhuSm5ZSEKQnHhBf0xAAwybbNH1IkrJNGnS/VG4I5yxig1pCEXE4g==",
"version": "1.6.8",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.6.8.tgz",
"integrity": "sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ==",
"dependencies": {
"follow-redirects": "^1.15.0",
"follow-redirects": "^1.15.6",
"form-data": "^4.0.0",
"proxy-from-env": "^1.1.0"
}
@ -490,7 +491,8 @@
"node_modules/balanced-match": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz",
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw=="
"integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
"devOptional": true
},
"node_modules/bcrypt-pbkdf": {
"version": "1.0.2",
@ -504,6 +506,7 @@
"version": "1.1.11",
"resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz",
"integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==",
"devOptional": true,
"dependencies": {
"balanced-match": "^1.0.0",
"concat-map": "0.0.1"
@ -672,7 +675,8 @@
"node_modules/concat-map": {
"version": "0.0.1",
"resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg=="
"integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==",
"devOptional": true
},
"node_modules/core-util-is": {
"version": "1.0.2",
@ -818,9 +822,9 @@
"integrity": "sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ=="
},
"node_modules/escalade": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz",
"integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==",
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz",
"integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==",
"engines": {
"node": ">=6"
}
@ -838,16 +842,16 @@
}
},
"node_modules/eslint": {
"version": "8.53.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-8.53.0.tgz",
"integrity": "sha512-N4VuiPjXDUa4xVeV/GC/RV3hQW9Nw+Y463lkWaKKXKYMvmRiRDAtfpuPFLN+E1/6ZhyR8J2ig+eVREnYgUsiag==",
"version": "8.57.0",
"resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz",
"integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==",
"dev": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.2.0",
"@eslint-community/regexpp": "^4.6.1",
"@eslint/eslintrc": "^2.1.3",
"@eslint/js": "8.53.0",
"@humanwhocodes/config-array": "^0.11.13",
"@eslint/eslintrc": "^2.1.4",
"@eslint/js": "8.57.0",
"@humanwhocodes/config-array": "^0.11.14",
"@humanwhocodes/module-importer": "^1.0.1",
"@nodelib/fs.walk": "^1.2.8",
"@ungap/structured-clone": "^1.2.0",
@ -1017,9 +1021,9 @@
"dev": true
},
"node_modules/fastq": {
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.15.0.tgz",
"integrity": "sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==",
"version": "1.17.1",
"resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz",
"integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==",
"dev": true,
"dependencies": {
"reusify": "^1.0.4"
@ -1059,9 +1063,9 @@
}
},
"node_modules/flat-cache": {
"version": "3.1.1",
"resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.1.1.tgz",
"integrity": "sha512-/qM2b3LUIaIgviBQovTLvijfyOQXPtSRnRK26ksj2J7rzPIecePUIpJsZ4T02Qg+xiAEKIs5K8dsHEd+VaKa/Q==",
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz",
"integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==",
"dev": true,
"dependencies": {
"flatted": "^3.2.9",
@ -1069,13 +1073,13 @@
"rimraf": "^3.0.2"
},
"engines": {
"node": ">=12.0.0"
"node": "^10.12.0 || >=12.0.0"
}
},
"node_modules/flatted": {
"version": "3.2.9",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.2.9.tgz",
"integrity": "sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==",
"version": "3.3.1",
"resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz",
"integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==",
"dev": true
},
"node_modules/fn.name": {
@ -1084,9 +1088,9 @@
"integrity": "sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw=="
},
"node_modules/follow-redirects": {
"version": "1.15.3",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz",
"integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==",
"version": "1.15.6",
"resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz",
"integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==",
"funding": [
{
"type": "individual",
@ -1124,9 +1128,9 @@
}
},
"node_modules/fs-extra": {
"version": "11.1.1",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.1.1.tgz",
"integrity": "sha512-MGIE4HOvQCeUCzmlHs0vXpih4ysz4wg9qiSAu6cd42lVwPbTM1TjV7RusoyQqMmk/95gdQZX72u+YW+c3eEpFQ==",
"version": "11.2.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-11.2.0.tgz",
"integrity": "sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw==",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^6.0.1",
@ -1161,7 +1165,8 @@
"node_modules/fs.realpath": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz",
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw=="
"integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==",
"dev": true
},
"node_modules/get-caller-file": {
"version": "2.0.5",
@ -1183,6 +1188,7 @@
"version": "7.2.3",
"resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz",
"integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==",
"dev": true,
"dependencies": {
"fs.realpath": "^1.0.0",
"inflight": "^1.0.4",
@ -1211,9 +1217,9 @@
}
},
"node_modules/globals": {
"version": "13.23.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz",
"integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==",
"version": "13.24.0",
"resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz",
"integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==",
"dev": true,
"dependencies": {
"type-fest": "^0.20.2"
@ -1301,9 +1307,9 @@
}
},
"node_modules/ignore": {
"version": "5.2.4",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz",
"integrity": "sha512-MAb38BcSbH0eHNBxn7ql2NH/kX33OkB3lZ1BNdh7ENeRChHTYsTvWrMubiIAMNS2llXEEgZ1MUOBtXChP3kaFQ==",
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz",
"integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==",
"dev": true,
"engines": {
"node": ">= 4"
@ -1338,6 +1344,7 @@
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
"integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==",
"devOptional": true,
"dependencies": {
"once": "^1.3.0",
"wrappy": "1"
@ -1427,9 +1434,9 @@
"integrity": "sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g=="
},
"node_modules/jose": {
"version": "4.15.4",
"resolved": "https://registry.npmjs.org/jose/-/jose-4.15.4.tgz",
"integrity": "sha512-W+oqK4H+r5sITxfxpSU+MMdr/YSWGvgZMQDIsNoBDGGy4i7GBPTtvFKibQzW06n3U3TqHjhvBJsirShsEJ6eeQ==",
"version": "4.15.5",
"resolved": "https://registry.npmjs.org/jose/-/jose-4.15.5.tgz",
"integrity": "sha512-jc7BFxgKPKi94uOvEmzlSWFFe2+vASyXaKUpdQKatWAESU2MWjDfFf0fdfc83CDKcA5QecabZeNLyfhe3yKNkg==",
"optional": true,
"funding": {
"url": "https://github.com/sponsors/panva"
@ -1629,6 +1636,7 @@
"version": "3.1.2",
"resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
"integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
"devOptional": true,
"dependencies": {
"brace-expansion": "^1.1.7"
},
@ -1688,9 +1696,9 @@
}
},
"node_modules/moment": {
"version": "2.29.4",
"resolved": "https://registry.npmjs.org/moment/-/moment-2.29.4.tgz",
"integrity": "sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==",
"version": "2.30.1",
"resolved": "https://registry.npmjs.org/moment/-/moment-2.30.1.tgz",
"integrity": "sha512-uEmtNhbDOrWPFS+hdjFCBfy9f2YoyzRpwcl+DqpC6taX21FzsTLQVbMV/W7PzNSX6x/bhC1zA3c2UQ5NzH6how==",
"optional": true,
"engines": {
"node": "*"
@ -1749,9 +1757,9 @@
}
},
"node_modules/nan": {
"version": "2.18.0",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.18.0.tgz",
"integrity": "sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w==",
"version": "2.19.0",
"resolved": "https://registry.npmjs.org/nan/-/nan-2.19.0.tgz",
"integrity": "sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw==",
"optional": true
},
"node_modules/natural-compare": {
@ -1804,6 +1812,7 @@
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"devOptional": true,
"dependencies": {
"wrappy": "1"
}
@ -1817,12 +1826,12 @@
}
},
"node_modules/openid-client": {
"version": "5.6.1",
"resolved": "https://registry.npmjs.org/openid-client/-/openid-client-5.6.1.tgz",
"integrity": "sha512-PtrWsY+dXg6y8mtMPyL/namZSYVz8pjXz3yJiBNZsEdCnu9miHLB4ELVC85WvneMKo2Rg62Ay7NkuCpM0bgiLQ==",
"version": "5.6.5",
"resolved": "https://registry.npmjs.org/openid-client/-/openid-client-5.6.5.tgz",
"integrity": "sha512-5P4qO9nGJzB5PI0LFlhj4Dzg3m4odt0qsJTfyEtZyOlkgpILwEioOhVVJOrS1iVH494S4Ee5OCjjg6Bf5WOj3w==",
"optional": true,
"dependencies": {
"jose": "^4.15.1",
"jose": "^4.15.5",
"lru-cache": "^6.0.0",
"object-hash": "^2.2.0",
"oidc-token-hash": "^5.0.3"
@ -1915,6 +1924,7 @@
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz",
"integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==",
"devOptional": true,
"engines": {
"node": ">=0.10.0"
}
@ -1982,9 +1992,9 @@
}
},
"node_modules/protobufjs": {
"version": "7.2.5",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.5.tgz",
"integrity": "sha512-gGXRSXvxQ7UiPgfw8gevrfRWcTlSbOFg+p/N+JVJEK5VhueL2miT6qTymqAmjr1Q5WbOCyJbyrk6JfWKwlFn6A==",
"version": "7.2.6",
"resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.6.tgz",
"integrity": "sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw==",
"hasInstallScript": true,
"dependencies": {
"@protobufjs/aspromise": "^1.1.2",
@ -2171,6 +2181,7 @@
"version": "3.0.2",
"resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz",
"integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==",
"dev": true,
"dependencies": {
"glob": "^7.1.3"
},
@ -2243,9 +2254,9 @@
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg=="
},
"node_modules/semver": {
"version": "7.5.4",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz",
"integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==",
"version": "7.6.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.6.0.tgz",
"integrity": "sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg==",
"dependencies": {
"lru-cache": "^6.0.0"
},
@ -2305,9 +2316,9 @@
}
},
"node_modules/ssh2": {
"version": "1.14.0",
"resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.14.0.tgz",
"integrity": "sha512-AqzD1UCqit8tbOKoj6ztDDi1ffJZ2rV2SwlgrVVrHPkV5vWqGJOVp5pmtj18PunkPJAuKQsnInyKV+/Nb2bUnA==",
"version": "1.15.0",
"resolved": "https://registry.npmjs.org/ssh2/-/ssh2-1.15.0.tgz",
"integrity": "sha512-C0PHgX4h6lBxYx7hcXwu3QWdh4tg6tZZsTfXcdvc5caW/EMxaB4H9dWsl7qk+F7LAW762hp8VbXOX7x4xUYvEw==",
"hasInstallScript": true,
"dependencies": {
"asn1": "^0.2.6",
@ -2317,8 +2328,8 @@
"node": ">=10.16.0"
},
"optionalDependencies": {
"cpu-features": "~0.0.8",
"nan": "^2.17.0"
"cpu-features": "~0.0.9",
"nan": "^2.18.0"
}
},
"node_modules/sshpk": {
@ -2418,9 +2429,9 @@
}
},
"node_modules/tar": {
"version": "6.2.0",
"resolved": "https://registry.npmjs.org/tar/-/tar-6.2.0.tgz",
"integrity": "sha512-/Wo7DcT0u5HUV486xg675HtjNd3BXZ6xDbzsCUZPt5iw8bTQ63bP0Raut3mvro9u+CUyq7YQd8Cx55fsZXxqLQ==",
"version": "6.2.1",
"resolved": "https://registry.npmjs.org/tar/-/tar-6.2.1.tgz",
"integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==",
"dependencies": {
"chownr": "^2.0.0",
"fs-minipass": "^2.0.0",
@ -2456,14 +2467,11 @@
"dev": true
},
"node_modules/tmp": {
"version": "0.2.1",
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz",
"integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==",
"dependencies": {
"rimraf": "^3.0.0"
},
"version": "0.2.3",
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz",
"integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==",
"engines": {
"node": ">=8.17.0"
"node": ">=14.14"
}
},
"node_modules/tmp-promise": {
@ -2623,9 +2631,9 @@
}
},
"node_modules/winston": {
"version": "3.11.0",
"resolved": "https://registry.npmjs.org/winston/-/winston-3.11.0.tgz",
"integrity": "sha512-L3yR6/MzZAOl0DsysUXHVjOwv8mKZ71TrA/41EIduGpOOV5LQVodqN+QdQ6BS6PJ/RdIshZhq84P/fStEZkk7g==",
"version": "3.13.0",
"resolved": "https://registry.npmjs.org/winston/-/winston-3.13.0.tgz",
"integrity": "sha512-rwidmA1w3SE4j0E5MuIufFhyJPBDG7Nu71RkZor1p2+qHvJSZ9GYDA81AyleQcZbh/+V6HjeBdfnTZJm9rSeQQ==",
"dependencies": {
"@colors/colors": "^1.6.0",
"@dabh/diagnostics": "^2.0.2",
@ -2637,16 +2645,16 @@
"safe-stable-stringify": "^2.3.1",
"stack-trace": "0.0.x",
"triple-beam": "^1.3.0",
"winston-transport": "^4.5.0"
"winston-transport": "^4.7.0"
},
"engines": {
"node": ">= 12.0.0"
}
},
"node_modules/winston-transport": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.6.0.tgz",
"integrity": "sha512-wbBA9PbPAHxKiygo7ub7BYRiKxms0tpfU2ljtWzb3SjRjv5yl6Ozuy/TkXf00HTAt+Uylo3gSkNwzc4ME0wiIg==",
"version": "4.7.0",
"resolved": "https://registry.npmjs.org/winston-transport/-/winston-transport-4.7.0.tgz",
"integrity": "sha512-ajBj65K5I7denzer2IYW6+2bNIVqLGDHqDw3Ow8Ohh+vdW+rv4MZ6eiDvHoKhfJFZ2auyN8byXieDDJ96ViONg==",
"dependencies": {
"logform": "^2.3.2",
"readable-stream": "^3.6.0",
@ -2688,12 +2696,13 @@
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==",
"devOptional": true
},
"node_modules/ws": {
"version": "8.14.2",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.14.2.tgz",
"integrity": "sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==",
"version": "8.16.0",
"resolved": "https://registry.npmjs.org/ws/-/ws-8.16.0.tgz",
"integrity": "sha512-HS0c//TP7Ina87TfiPUz1rQzMhHrl/SG2guqRcTOIUYD2q8uhUdNHZYJUaQ8aTGPzCh+c6oawMKW35nFl1dxyQ==",
"engines": {
"node": ">=10.0.0"
},

View File

@ -1,6 +1,6 @@
{
"name": "democratic-csi",
"version": "1.8.4",
"version": "1.9.0",
"description": "kubernetes csi driver framework",
"main": "bin/democratic-csi",
"scripts": {

View File

@ -4,9 +4,20 @@ const { GrpcError, grpc } = require("../../utils/grpc");
const cp = require("child_process");
const fs = require("fs");
const fse = require("fs-extra");
const Kopia = require("../../utils/kopia").Kopia;
const os = require("os");
const path = require("path");
const registry = require("../../utils/registry");
const Restic = require("../../utils/restic").Restic;
const semver = require("semver");
const __REGISTRY_NS__ = "ControllerClientCommonDriver";
// https://forum.restic.net/t/how-to-prevent-two-restic-tasks-concurrently/6859/5
const SNAPSHOTS_CUT_IN_FLIGHT = new Set();
const SNAPSHOTS_RESTORE_IN_FLIGHT = new Set();
const DEFAULT_SNAPSHOT_DRIVER = "filecopy";
/**
* Crude nfs-client driver which simply creates directories to be mounted
* and uses rsync for cloning/snapshots
@ -102,6 +113,21 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
if (this.ctx.args.csiMode.includes("controller")) {
setInterval(() => {
this.ctx.logger.info("snapshots cut in flight", {
names: [...SNAPSHOTS_CUT_IN_FLIGHT],
count: SNAPSHOTS_CUT_IN_FLIGHT.size,
});
}, 30 * 1000);
setInterval(() => {
this.ctx.logger.info("snapshots restore in flight", {
names: [...SNAPSHOTS_RESTORE_IN_FLIGHT],
count: SNAPSHOTS_RESTORE_IN_FLIGHT.size,
});
}, 30 * 1000);
}
}
getAccessModes(capability) {
@ -429,6 +455,90 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
return p.replaceAll(path.posix.sep, path.win32.sep);
}
async getResticClient() {
const driver = this;
return registry.get(`${__REGISTRY_NS__}:restic`, () => {
const config_key = driver.getConfigKey();
const restic_env = _.get(
driver.options[config_key],
"snapshots.restic.env",
{}
);
const restic_global_flags = _.get(
driver.options[config_key],
"snapshots.restic.global_flags",
[]
);
const client = new Restic({
env: restic_env,
logger: driver.ctx.logger,
global_flags: restic_global_flags,
});
let hostname = driver.ctx.args.csiName;
if (driver.options.driver == "local-hostpath") {
let nodename = process.env.CSI_NODE_ID || os.hostname();
hostname = `${hostname}-${nodename}`;
}
return client;
});
}
async getKopiaClient() {
const driver = this;
return registry.getAsync(`${__REGISTRY_NS__}:kopia`, async () => {
const config_key = driver.getConfigKey();
const kopia_env = _.get(
driver.options[config_key],
"snapshots.kopia.env",
{}
);
const kopia_global_flags = _.get(
driver.options[config_key],
"snapshots.kopia.global_flags",
[]
);
const client = new Kopia({
env: kopia_env,
logger: driver.ctx.logger,
global_flags: kopia_global_flags,
});
let hostname = driver.ctx.args.csiName;
if (driver.options.driver == "local-hostpath") {
let nodename = process.env.CSI_NODE_ID || os.hostname();
hostname = `${hostname}-${nodename}`;
}
let username = "democratic-csi";
await client.repositoryConnect([
"--override-hostname",
hostname,
"--override-username",
username,
"from-config",
"--token",
_.get(driver.options[config_key], "snapshots.kopia.config_token", ""),
]);
//let repositoryStatus = await client.repositoryStatus();
//console.log(repositoryStatus);
client.hostname = hostname;
client.username = username;
return client;
});
}
/**
* Create a volume doing in essence the following:
* 1. create directory
@ -442,9 +552,10 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
async CreateVolume(call) {
const driver = this;
let config_key = this.getConfigKey();
let volume_id = await driver.getVolumeIdFromCall(call);
let volume_content_source = call.request.volume_content_source;
const config_key = driver.getConfigKey();
const volume_id = await driver.getVolumeIdFromCall(call);
const volume_content_source = call.request.volume_content_source;
const instance_id = driver.options.instance_id;
if (
call.request.volume_capabilities &&
@ -518,13 +629,117 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
// create dataset
if (volume_content_source) {
let snapshot_driver;
let snapshot_id;
if (volume_content_source.type == "snapshot") {
snapshot_id = volume_content_source.snapshot.snapshot_id;
// get parsed variant of driver to allow snapshotter to work with all
// drivers simultaneously
const parsed_snapshot_id = new URLSearchParams(snapshot_id);
if (parsed_snapshot_id.get("snapshot_driver")) {
snapshot_id = parsed_snapshot_id.get("snapshot_id");
snapshot_driver = parsed_snapshot_id.get("snapshot_driver");
} else {
snapshot_driver = "filecopy";
}
}
switch (volume_content_source.type) {
// must be available when adverstising CREATE_DELETE_SNAPSHOT
// simply clone
case "snapshot":
source_path = driver.getControllerSnapshotPath(
volume_content_source.snapshot.snapshot_id
);
switch (snapshot_driver) {
case "filecopy":
{
source_path = driver.getControllerSnapshotPath(snapshot_id);
if (!(await driver.directoryExists(source_path))) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_content_source path: ${source_path}`
);
}
driver.ctx.logger.debug(
"controller volume source path: %s",
source_path
);
await driver.cloneDir(source_path, volume_path);
}
break;
case "restic":
{
const restic = await driver.getResticClient();
let options = [];
await restic.init();
// find snapshot
options = [snapshot_id];
const snapshots = await restic.snapshots(options);
if (!snapshots.length > 0) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid restic snapshot volume_content_source: ${snapshot_id}`
);
}
const snapshot = snapshots[snapshots.length - 1];
// restore snapshot
// --verify?
options = [
`${snapshot.id}:${snapshot.paths[0]}`,
"--target",
volume_path,
"--sparse",
"--host",
restic.hostname,
];
// technically same snapshot could be getting restored to multiple volumes simultaneously
// ensure we add target path as part of the key
SNAPSHOTS_RESTORE_IN_FLIGHT.add(
`${snapshot_id}:${volume_path}`
);
await restic.restore(options).finally(() => {
SNAPSHOTS_RESTORE_IN_FLIGHT.delete(
`${snapshot_id}:${volume_path}`
);
});
}
break;
case "kopia":
{
const kopia = await driver.getKopiaClient();
const snapshot = await kopia.snapshotGet(snapshot_id);
if (!snapshot) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid restic snapshot volume_content_source: ${snapshot_id}`
);
}
/**
* --[no-]write-files-atomically
* --[no-]write-sparse-files
*/
let options = [
"--write-sparse-files",
snapshot_id,
volume_path,
];
await kopia.snapshotRestore(options);
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unknown snapthot driver: ${snapshot_driver}`
);
}
break;
// must be available when adverstising CLONE_VOLUME
// create snapshot first, then clone
@ -532,24 +747,26 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
source_path = driver.getControllerVolumePath(
volume_content_source.volume.volume_id
);
if (!(await driver.directoryExists(source_path))) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_content_source path: ${source_path}`
);
}
driver.ctx.logger.debug(
"controller volume source path: %s",
source_path
);
await driver.cloneDir(source_path, volume_path);
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`invalid volume_content_source type: ${volume_content_source.type}`
);
break;
}
if (!(await driver.directoryExists(source_path))) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_content_source path: ${source_path}`
);
}
driver.ctx.logger.debug("controller source path: %s", source_path);
await driver.cloneDir(source_path, volume_path);
}
// set mode
@ -627,7 +844,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
async DeleteVolume(call) {
const driver = this;
let volume_id = call.request.volume_id;
const volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(
@ -636,6 +853,17 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
const volume_path = driver.getControllerVolumePath(volume_id);
await driver.deleteDir(volume_path);
@ -717,14 +945,49 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
}
/**
* Create snapshot is meant to be a syncronous call to 'cut' the snapshot
* in the case of rsync/restic/kopia/etc tooling a 'cut' can take a very
* long time. It was deemed appropriate to continue to wait vs making the
* call async with `ready_to_use` false.
*
* Restic:
* With restic the idea is to keep the tree scoped to each volume. Each
* new snapshot for the same volume should have a parent of the most recently
* cut snapshot for the same volume. Behind the scenes restic is applying
* dedup logic globally in the repo so efficiency should still be extremely
* efficient.
*
* Kopia:
*
*
* https://github.com/container-storage-interface/spec/blob/master/spec.md#createsnapshot
*
* @param {*} call
*/
async CreateSnapshot(call) {
const driver = this;
const config_key = driver.getConfigKey();
let snapshot_driver = _.get(
driver.options[config_key],
"snapshots.default_driver",
DEFAULT_SNAPSHOT_DRIVER
);
// randomize driver for testing
//if (process.env.CSI_SANITY == "1") {
// call.request.parameters.driver = ["filecopy", "restic", "kopia"].random();
//}
if (call.request.parameters.driver) {
snapshot_driver = call.request.parameters.driver;
}
const instance_id = driver.options.instance_id;
let response;
// both these are required
let source_volume_id = call.request.source_volume_id;
const source_volume_id = call.request.source_volume_id;
let name = call.request.name;
if (!source_volume_id) {
@ -759,17 +1022,262 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
name = name.replace(/[^a-z0-9_\-:.+]+/gi, "");
driver.ctx.logger.verbose("cleansed snapshot name: %s", name);
const snapshot_id = `${source_volume_id}-${name}`;
const volume_path = driver.getControllerVolumePath(source_volume_id);
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
//const volume_path = "/home/thansen/beets/";
//const volume_path = "/var/lib/docker/";
// do NOT overwrite existing snapshot
if (!(await driver.directoryExists(snapshot_path))) {
await driver.cloneDir(volume_path, snapshot_path);
let snapshot_id;
let size_bytes = 0;
let ready_to_use = true;
let snapshot_date = new Date();
switch (snapshot_driver) {
case "filecopy":
{
snapshot_id = `${source_volume_id}-${name}`;
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
const snapshot_dir_exists = await driver.directoryExists(
snapshot_path
);
// do NOT overwrite existing snapshot
if (!snapshot_dir_exists) {
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
await driver.cloneDir(volume_path, snapshot_path).finally(() => {
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
});
driver.ctx.logger.info(
`filecopy backup finished: snapshot_id=${snapshot_id}, path=${volume_path}`
);
} else {
driver.ctx.logger.debug(
`filecopy backup already cut: ${snapshot_id}`
);
}
size_bytes = await driver.getDirectoryUsage(snapshot_path);
}
break;
case "restic":
{
const restic = await driver.getResticClient();
const group_by_options = ["--group-by", "host,paths,tags"];
let snapshot_exists = false;
// --tag specified multiple times is OR logic, comma-separated is AND logic
let base_tag_option = `source=democratic-csi`;
base_tag_option += `,csi_volume_id=${source_volume_id}`;
if (instance_id) {
base_tag_option += `csi_instance_id=${instance_id}`;
}
let options = [];
/**
* ensure repo has been initted
*
* it is expected that at a minimum the following env vars are set
* RESTIC_PASSWORD
* RESTIC_REPOSITORY
*/
options = [];
await restic.init();
// see if snapshot already exist with matching tags, etc
options = [
"--path",
volume_path.replace(/\/$/, ""),
"--host",
restic.hostname,
];
// when searching for existing snapshot include name
response = await restic.snapshots(
options
.concat(group_by_options)
.concat(["--tag", base_tag_option + `,csi_snapshot_name=${name}`])
);
if (response.length > 0) {
snapshot_exists = true;
const snapshot = response[response.length - 1];
driver.ctx.logger.debug(
`restic backup already cut: ${snapshot.id}`
);
const stats = await restic.stats([snapshot.id]);
snapshot_id = snapshot.id;
snapshot_date = new Date(snapshot.time);
size_bytes = stats.total_size;
}
if (!snapshot_exists) {
// --no-scan do not run scanner to estimate size of backup
// -x, --one-file-system exclude other file systems, don't cross filesystem boundaries and subvolumes
options = [
"--host",
restic.hostname,
"--one-file-system",
//"--no-scan",
];
// backup with minimal tags to ensure a sane parent for the volume (since tags are included in group_by)
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
response = await restic
.backup(
volume_path,
options
.concat(group_by_options)
.concat(["--tag", base_tag_option])
)
.finally(() => {
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
});
response.parsed.reverse();
let summary = response.parsed.find((message) => {
return message.message_type == "summary";
});
snapshot_id = summary.snapshot_id;
driver.ctx.logger.info(
`restic backup finished: snapshot_id=${snapshot_id}, path=${volume_path}, total_duration=${
summary.total_duration | 0
}s`
);
const stats = await restic.stats([snapshot_id]);
size_bytes = stats.total_size;
// only apply these tags at creation, do NOT use for search above etc
let add_tags = `csi_snapshot_name=${name}`;
let config_tags = _.get(
driver.options[config_key],
"snapshots.restic.tags",
[]
);
if (config_tags.length > 0) {
add_tags += `,${config_tags.join(",")}`;
}
await restic.tag([
"--path",
volume_path.replace(/\/$/, ""),
"--host",
restic.hostname,
"--add",
add_tags,
snapshot_id,
]);
// this is ugly, the tag operation should output the new id, so we
// must resort to full query of all snapshots for the volume
// find snapshot using `original` id as adding tags creates a new id
options = [
"--path",
volume_path.replace(/\/$/, ""),
"--host",
restic.hostname,
];
response = await restic.snapshots(
options
.concat(group_by_options)
.concat([
"--tag",
`${base_tag_option},csi_snapshot_name=${name}`,
])
);
let original_snapshot_id = snapshot_id;
let snapshot = response.find((snapshot) => {
return snapshot.original == original_snapshot_id;
});
if (!snapshot) {
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to find snapshot post-tag operation: snapshot_id=${original_snapshot_id}`
);
}
snapshot_id = snapshot.id;
driver.ctx.logger.info(
`restic backup successfully applied additional tags: new_snapshot_id=${snapshot_id}, original_snapshot_id=${original_snapshot_id} path=${volume_path}`
);
}
}
break;
case "kopia":
{
const kopia = await driver.getKopiaClient();
let options = [];
let snapshot_exists = false;
// --tags specified multiple times means snapshot must contain ALL supplied tags
let tags = [];
tags.push(`source:democratic-csi`);
tags.push(`csi_volume_id:${source_volume_id}`);
if (instance_id) {
tags.push(`csi_instance_id:${instance_id}`);
}
tags.push(`csi_snapshot_name:${name}`);
options = ["--no-storage-stats", "--no-delta"];
tags.forEach((item) => {
options.push("--tags", item);
});
options.push(
`${kopia.username}@${kopia.hostname}:${volume_path.replace(
/\/$/,
""
)}`
);
response = await kopia.snapshotList(options);
if (response.length > 0) {
snapshot_exists = true;
const snapshot = response[response.length - 1];
driver.ctx.logger.debug(
`kopia snapshot already cut: ${snapshot.id}`
);
snapshot_id = snapshot.id;
snapshot_date = new Date(snapshot.startTime); // maybe use endTime?
size_bytes = snapshot.stats.totalSize;
}
if (!snapshot_exists) {
// create snapshot
options = [];
tags.forEach((item) => {
options.push("--tags", item);
});
options.push(volume_path);
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
response = await kopia.snapshotCreate(options).finally(() => {
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
});
snapshot_id = response.id;
snapshot_date = new Date(response.startTime); // maybe use endTime?
let snapshot_end_date = new Date(response.endTime);
let total_duration =
Math.abs(snapshot_end_date.getTime() - snapshot_date.getTime()) /
1000;
size_bytes = response.rootEntry.summ.size;
driver.ctx.logger.info(
`kopia backup finished: snapshot_id=${snapshot_id}, path=${volume_path}, total_duration=${
total_duration | 0
}s`
);
}
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unknown snapthot driver: ${snapshot_driver}`
);
}
let size_bytes = await driver.getDirectoryUsage(snapshot_path);
return {
snapshot: {
/**
@ -777,14 +1285,17 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
* is needed to create a volume from this snapshot.
*/
size_bytes,
snapshot_id,
snapshot_id: new URLSearchParams({
snapshot_driver,
snapshot_id,
}).toString(),
source_volume_id: source_volume_id,
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
creation_time: {
seconds: Math.round(new Date().getTime() / 1000),
seconds: Math.round(snapshot_date.getTime() / 1000),
nanos: 0,
},
ready_to_use: true,
ready_to_use,
},
};
}
@ -798,7 +1309,11 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
async DeleteSnapshot(call) {
const driver = this;
const snapshot_id = call.request.snapshot_id;
let snapshot_id = call.request.snapshot_id;
let snapshot_driver;
const config_key = driver.getConfigKey();
const instance_id = driver.options.instance_id;
let response;
if (!snapshot_id) {
throw new GrpcError(
@ -807,8 +1322,70 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
);
}
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.deleteDir(snapshot_path);
// get parsed variant of driver to allow snapshotter to work with all
// drivers simultaneously
const parsed_snapshot_id = new URLSearchParams(snapshot_id);
if (parsed_snapshot_id.get("snapshot_driver")) {
snapshot_id = parsed_snapshot_id.get("snapshot_id");
snapshot_driver = parsed_snapshot_id.get("snapshot_driver");
} else {
snapshot_driver = "filecopy";
}
switch (snapshot_driver) {
case "filecopy":
{
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
await driver.deleteDir(snapshot_path);
}
break;
case "restic":
{
let prune = _.get(
driver.options[config_key],
"snapshots.restic.prune",
false
);
if (typeof prune != "boolean") {
prune = String(prune);
if (["true", "yes", "1"].includes(prune.toLowerCase())) {
prune = true;
} else {
prune = false;
}
}
const restic = await driver.getResticClient();
let options = [];
await restic.init();
// we preempt with this check to prevent locking the repo when snapshot does not exist
const snapshot_exists = await restic.snapshot_exists(snapshot_id);
if (snapshot_exists) {
options = [];
if (prune) {
options.push("--prune");
}
options.push(snapshot_id);
await restic.forget(options);
}
}
break;
case "kopia":
{
const kopia = await driver.getKopiaClient();
let options = [snapshot_id];
await kopia.snapshotDelete(options);
}
break;
default:
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`unknown snapthot driver: ${snapshot_driver}`
);
}
return {};
}

View File

@ -0,0 +1,671 @@
const _ = require("lodash");
const { CsiBaseDriver } = require("../index");
const { GrpcError, grpc } = require("../../utils/grpc");
const GeneralUtils = require("../../utils/general");
const { ObjectiveFS } = require("../../utils/objectivefs");
const registry = require("../../utils/registry");
const semver = require("semver");
const uuidv4 = require("uuid").v4;
const __REGISTRY_NS__ = "ControllerZfsLocalDriver";
const MAX_VOLUME_NAME_LENGTH = 63;
class ControllerObjectiveFSDriver extends CsiBaseDriver {
constructor(ctx, options) {
super(...arguments);
options = options || {};
options.service = options.service || {};
options.service.identity = options.service.identity || {};
options.service.controller = options.service.controller || {};
options.service.node = options.service.node || {};
options.service.identity.capabilities =
options.service.identity.capabilities || {};
options.service.controller.capabilities =
options.service.controller.capabilities || {};
options.service.node.capabilities = options.service.node.capabilities || {};
if (!("service" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity service caps");
options.service.identity.capabilities.service = [
//"UNKNOWN",
"CONTROLLER_SERVICE",
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
];
}
if (!("volume_expansion" in options.service.identity.capabilities)) {
this.ctx.logger.debug("setting default identity volume_expansion caps");
options.service.identity.capabilities.volume_expansion = [
//"UNKNOWN",
//"ONLINE",
//"OFFLINE"
];
}
if (!("rpc" in options.service.controller.capabilities)) {
this.ctx.logger.debug("setting default controller caps");
options.service.controller.capabilities.rpc = [
//"UNKNOWN",
"CREATE_DELETE_VOLUME",
//"PUBLISH_UNPUBLISH_VOLUME",
"LIST_VOLUMES",
//"GET_CAPACITY",
//"CREATE_DELETE_SNAPSHOT",
//"LIST_SNAPSHOTS",
//"CLONE_VOLUME",
//"PUBLISH_READONLY",
//"EXPAND_VOLUME",
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
options.service.controller.capabilities.rpc
.push
//"VOLUME_CONDITION",
//"GET_VOLUME"
();
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.controller.capabilities.rpc.push(
"SINGLE_NODE_MULTI_WRITER"
);
}
}
if (!("rpc" in options.service.node.capabilities)) {
this.ctx.logger.debug("setting default node caps");
options.service.node.capabilities.rpc = [
//"UNKNOWN",
"STAGE_UNSTAGE_VOLUME",
"GET_VOLUME_STATS",
//"EXPAND_VOLUME"
];
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
}
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
/**
* This is for volumes that support a mount time gid such as smb or fat
*/
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
}
}
}
async getObjectiveFSClient() {
const driver = this;
return registry.getAsync(
`${__REGISTRY_NS__}:objectivefsclient`,
async () => {
const options = {};
options.sudo = _.get(
driver.options,
"objectivefs.cli.sudoEnabled",
false
);
options.pool = _.get(driver.options, "objectivefs.pool");
return new ObjectiveFS({
...options,
env: _.get(driver.options, "objectivefs.env", {}),
});
}
);
}
/**
*
* @returns Array
*/
getAccessModes(capability) {
let access_modes = _.get(this.options, "csi.access_modes", null);
if (access_modes !== null) {
return access_modes;
}
access_modes = [
"UNKNOWN",
"SINGLE_NODE_WRITER",
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
"SINGLE_NODE_READER_ONLY",
"MULTI_NODE_READER_ONLY",
"MULTI_NODE_SINGLE_WRITER",
"MULTI_NODE_MULTI_WRITER",
];
if (
capability.access_type == "block" &&
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
) {
access_modes.push("MULTI_NODE_MULTI_WRITER");
}
return access_modes;
}
getFsTypes() {
return ["fuse.objectivefs", "objectivefs"];
}
assertCapabilities(capabilities) {
const driver = this;
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
let message = null;
let fs_types = driver.getFsTypes();
const valid = capabilities.every((capability) => {
if (capability.access_type != "mount") {
message = `invalid access_type ${capability.access_type}`;
return false;
}
if (
capability.mount.fs_type &&
!fs_types.includes(capability.mount.fs_type)
) {
message = `invalid fs_type ${capability.mount.fs_type}`;
return false;
}
if (
!this.getAccessModes(capability).includes(capability.access_mode.mode)
) {
message = `invalid access_mode, ${capability.access_mode.mode}`;
return false;
}
return true;
});
return { valid, message };
}
async getVolumeStatus(entry) {
const driver = this;
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
const volume_id = entry.NAME.replace(object_store, "").split("/")[1];
if (!!!semver.satisfies(driver.ctx.csiVersion, ">=1.2.0")) {
return;
}
let abnormal = false;
let message = "OK";
let volume_status = {};
//LIST_VOLUMES_PUBLISHED_NODES
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.2.0") &&
driver.options.service.controller.capabilities.rpc.includes(
"LIST_VOLUMES_PUBLISHED_NODES"
)
) {
// TODO: let drivers fill this in
volume_status.published_node_ids = [];
}
//VOLUME_CONDITION
if (
semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") &&
driver.options.service.controller.capabilities.rpc.includes(
"VOLUME_CONDITION"
)
) {
// TODO: let drivers fill ths in
volume_condition = { abnormal, message };
volume_status.volume_condition = volume_condition;
}
return volume_status;
}
async populateCsiVolumeFromData(entry) {
const driver = this;
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
let filesystem = entry.NAME.replace(object_store, "");
let volume_content_source;
let volume_context = {
provisioner_driver: driver.options.driver,
node_attach_driver: "objectivefs",
filesystem,
object_store,
"env.OBJECTSTORE": object_store,
};
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
let accessible_topology;
let volume = {
volume_id: filesystem.split("/")[1],
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
accessible_topology,
};
return volume;
}
/**
* Ensure sane options are used etc
* true = ready
* false = not ready, but progressiong towards ready
* throw error = faulty setup
*
* @param {*} call
*/
async Probe(call) {
const driver = this;
const pool = _.get(driver.options, "objectivefs.pool");
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
if (driver.ctx.args.csiMode.includes("controller")) {
if (!pool) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`objectivefs.pool not configured`
);
}
if (!object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`env.OBJECTSTORE not configured`
);
}
return { ready: { value: true } };
} else {
return { ready: { value: true } };
}
}
/**
* Create an objectivefs filesystem as a new volume
*
* @param {*} call
*/
async CreateVolume(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
const parameters = call.request.parameters;
if (!pool) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`objectivefs.pool not configured`
);
}
if (!object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`env.OBJECTSTORE not configured`
);
}
const context_env = {};
for (const key in parameters) {
if (key.startsWith("env.")) {
context_env[key] = parameters[key];
}
}
context_env["env.OBJECTSTORE"] = object_store;
// filesystem names are always lower-cased by ofs
let volume_id = await driver.getVolumeIdFromCall(call);
let volume_content_source = call.request.volume_content_source;
volume_id = volume_id.toLowerCase();
const filesystem = `${pool}/${volume_id}`;
if (volume_id.length >= MAX_VOLUME_NAME_LENGTH) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`derived volume_id ${volume_id} is too long for objectivefs`
);
}
if (
call.request.volume_capabilities &&
call.request.volume_capabilities.length > 0
) {
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
}
} else {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
"missing volume_capabilities"
);
}
if (
!call.request.capacity_range ||
Object.keys(call.request.capacity_range).length === 0
) {
call.request.capacity_range = {
required_bytes: 1073741824, // meaningless
};
}
if (
call.request.capacity_range.required_bytes > 0 &&
call.request.capacity_range.limit_bytes > 0 &&
call.request.capacity_range.required_bytes >
call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required_bytes is greather than limit_bytes`
);
}
let capacity_bytes =
call.request.capacity_range.required_bytes ||
call.request.capacity_range.limit_bytes;
if (!capacity_bytes) {
//should never happen, value must be set
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume capacity is required (either required_bytes or limit_bytes)`
);
}
// ensure *actual* capacity is not greater than limit
if (
call.request.capacity_range.limit_bytes &&
call.request.capacity_range.limit_bytes > 0 &&
capacity_bytes > call.request.capacity_range.limit_bytes
) {
throw new GrpcError(
grpc.status.OUT_OF_RANGE,
`required volume capacity is greater than limit`
);
}
if (volume_content_source) {
//should never happen, cannot clone with this driver
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`cloning is not enabled`
);
}
await ofsClient.create({}, filesystem, ["-f"]);
let volume_context = {
provisioner_driver: driver.options.driver,
node_attach_driver: "objectivefs",
filesystem,
...context_env,
};
if (driver.options.instance_id) {
volume_context["provisioner_driver_instance_id"] =
driver.options.instance_id;
}
const res = {
volume: {
volume_id,
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
capacity_bytes: 0,
content_source: volume_content_source,
volume_context,
},
};
return res;
}
/**
* Delete a volume
*
* Deleting a volume consists of the following steps:
* 1. delete directory
*
* @param {*} call
*/
async DeleteVolume(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
let volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(
grpc.status.INVALID_ARGUMENT,
`volume_id is required`
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
volume_id = volume_id.toLowerCase();
const filesystem = `${pool}/${volume_id}`;
await ofsClient.destroy({}, filesystem, []);
return {};
}
/**
*
* @param {*} call
*/
async ControllerExpandVolume(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* TODO: consider volume_capabilities?
*
* @param {*} call
*/
async GetCapacity(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* TODO: check capability to ensure not asking about block volumes
*
* @param {*} call
*/
async ListVolumes(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
let entries = [];
let entries_length = 0;
let next_token;
let uuid;
let response;
const max_entries = call.request.max_entries;
const starting_token = call.request.starting_token;
// get data from cache and return immediately
if (starting_token) {
let parts = starting_token.split(":");
uuid = parts[0];
let start_position = parseInt(parts[1]);
let end_position;
if (max_entries > 0) {
end_position = start_position + max_entries;
}
entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`);
if (entries) {
entries_length = entries.length;
entries = entries.slice(start_position, end_position);
if (max_entries > 0 && end_position > entries_length) {
next_token = `${uuid}:${end_position}`;
} else {
next_token = null;
}
const data = {
entries: entries,
next_token: next_token,
};
return data;
} else {
throw new GrpcError(
grpc.status.ABORTED,
`invalid starting_token: ${starting_token}`
);
}
}
entries = [];
const list_entries = await ofsClient.list({});
for (const entry of list_entries) {
if (entry.KIND != "ofs") {
continue;
}
let volume = await driver.populateCsiVolumeFromData(entry);
if (volume) {
let status = await driver.getVolumeStatus(entry);
entries.push({
volume,
status,
});
}
}
if (max_entries && entries.length > max_entries) {
uuid = uuidv4();
this.ctx.cache.set(`ListVolumes:result:${uuid}`, entries);
next_token = `${uuid}:${max_entries}`;
entries = entries.slice(0, max_entries);
}
const data = {
entries: entries,
next_token: next_token,
};
return data;
}
/**
*
* @param {*} call
*/
async ListSnapshots(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async CreateSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
* In addition, if clones have been created from a snapshot, then they must
* be destroyed before the snapshot can be destroyed.
*
* @param {*} call
*/
async DeleteSnapshot(call) {
throw new GrpcError(
grpc.status.UNIMPLEMENTED,
`operation not supported by driver`
);
}
/**
*
* @param {*} call
*/
async ValidateVolumeCapabilities(call) {
const driver = this;
const ofsClient = await driver.getObjectiveFSClient();
const pool = _.get(driver.options, "objectivefs.pool");
const volume_id = call.request.volume_id;
if (!volume_id) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing volume_id`);
}
const filesystem = `${pool}/${volume_id}`;
const entries = await ofsClient.list({}, filesystem);
const exists = entries.some((entry) => {
return entry.NAME.endsWith(filesystem) && entry.KIND == "ofs";
});
if (!exists) {
throw new GrpcError(
grpc.status.NOT_FOUND,
`invalid volume_id: ${volume_id}`
);
}
const capabilities = call.request.volume_capabilities;
if (!capabilities || capabilities.length === 0) {
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing capabilities`);
}
const result = this.assertCapabilities(call.request.volume_capabilities);
if (result.valid !== true) {
return { message: result.message };
}
return {
confirmed: {
volume_context: call.request.volume_context,
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
parameters: call.request.parameters,
},
};
}
}
module.exports.ControllerObjectiveFSDriver = ControllerObjectiveFSDriver;

View File

@ -691,6 +691,17 @@ class ControllerSynologyDriver extends CsiBaseDriver {
);
}
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
let response;
switch (driver.getDriverShareType()) {

View File

@ -219,6 +219,22 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
basename = this.options.iscsi.shareStrategyTargetCli.basename;
let setAttributesText = "";
let setAuthText = "";
let setBlockAttributesText = "";
if (this.options.iscsi.shareStrategyTargetCli.block) {
if (this.options.iscsi.shareStrategyTargetCli.block.attributes) {
for (const attributeName in this.options.iscsi
.shareStrategyTargetCli.block.attributes) {
const attributeValue =
this.options.iscsi.shareStrategyTargetCli.block.attributes[
attributeName
];
setBlockAttributesText += "\n";
setBlockAttributesText += `set attribute ${attributeName}=${attributeValue}`;
}
}
}
if (this.options.iscsi.shareStrategyTargetCli.tpg) {
if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) {
for (const attributeName in this.options.iscsi
@ -263,6 +279,8 @@ ${setAuthText}
# create extent
cd /backstores/block
create ${assetName} /dev/${extentDiskName}
cd /backstores/block/${assetName}
${setBlockAttributesText}
# add extent to target/tpg
cd /iscsi/${basename}:${assetName}/tpg1/luns

View File

@ -617,9 +617,9 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
);
}
return { ready: { value: true } };
return super.Probe(...arguments);
} else {
return { ready: { value: true } };
return super.Probe(...arguments);
}
}
@ -1190,11 +1190,30 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
// this should be already set, but when coming from a volume source
// it may not match that of the source
// TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured
properties.volsize = capacity_bytes;
//dedup
//compression
// dedup
// on, off, verify
// zfs set dedup=on tank/home
// restore default must use the below
// zfs inherit [-rS] property filesystem|volume|snapshot…
if (
(typeof this.options.zfs.zvolDedup === "string" ||
this.options.zfs.zvolDedup instanceof String) &&
this.options.zfs.zvolDedup.length > 0
) {
properties.dedup = this.options.zfs.zvolDedup;
}
// compression
// lz4, gzip-9, etc
if (
(typeof this.options.zfs.zvolCompression === "string" ||
this.options.zfs.zvolCompression instanceof String) &&
this.options.zfs.zvolCompression > 0
) {
properties.compression = this.options.zfs.zvolCompression;
}
if (setProps) {
await zb.zfs.set(datasetName, properties);
@ -1297,6 +1316,17 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
driver.ctx.logger.debug("dataset properties: %j", properties);
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
// remove share resources
await this.deleteShare(call, datasetName);

View File

@ -12,6 +12,7 @@ const {
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
const { ControllerSmbClientDriver } = require("./controller-smb-client");
const { ControllerLustreClientDriver } = require("./controller-lustre-client");
const { ControllerObjectiveFSDriver } = require("./controller-objectivefs");
const { ControllerSynologyDriver } = require("./controller-synology");
const { NodeManualDriver } = require("./node-manual");
@ -50,6 +51,8 @@ function factory(ctx, options) {
return new ControllerLocalHostpathDriver(ctx, options);
case "lustre-client":
return new ControllerLustreClientDriver(ctx, options);
case "objectivefs":
return new ControllerObjectiveFSDriver(ctx, options);
case "node-manual":
return new NodeManualDriver(ctx, options);
default:

View File

@ -183,8 +183,17 @@ class FreeNASApiDriver extends CsiBaseDriver {
const apiVersion = httpClient.getApiVersion();
const zb = await this.getZetabyte();
const truenasVersion = semver.coerce(
await httpApiClient.getSystemVersionMajorMinor()
await httpApiClient.getSystemVersionMajorMinor(),
{ loose: true }
);
if (!truenasVersion) {
throw new GrpcError(
grpc.status.UNKNOWN,
`unable to detect TrueNAS version`
);
}
const isScale = await httpApiClient.getIsScale();
let volume_context;
@ -2187,6 +2196,15 @@ class FreeNASApiDriver extends CsiBaseDriver {
);
}
try {
await httpApiClient.getSystemVersion();
} catch (err) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`TrueNAS api is unavailable: ${err.getMessage()}`
);
}
if (!(await httpApiClient.getIsScale())) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
@ -2194,9 +2212,9 @@ class FreeNASApiDriver extends CsiBaseDriver {
);
}
return { ready: { value: true } };
return super.Probe(...arguments);
} else {
return { ready: { value: true } };
return super.Probe(...arguments);
}
}
@ -2908,11 +2926,30 @@ class FreeNASApiDriver extends CsiBaseDriver {
// this should be already set, but when coming from a volume source
// it may not match that of the source
// TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured
properties.volsize = capacity_bytes;
//dedup
//compression
// dedup
// on, off, verify
// zfs set dedup=on tank/home
// restore default must use the below
// zfs inherit [-rS] property filesystem|volume|snapshot…
if (
(typeof this.options.zfs.zvolDedup === "string" ||
this.options.zfs.zvolDedup instanceof String) &&
this.options.zfs.zvolDedup.length > 0
) {
properties.dedup = this.options.zfs.zvolDedup;
}
// compression
// lz4, gzip-9, etc
if (
(typeof this.options.zfs.zvolCompression === "string" ||
this.options.zfs.zvolCompression instanceof String) &&
this.options.zfs.zvolCompression > 0
) {
properties.compression = this.options.zfs.zvolCompression;
}
if (setProps) {
await httpApiClient.DatasetSet(datasetName, properties);
@ -3011,6 +3048,17 @@ class FreeNASApiDriver extends CsiBaseDriver {
driver.ctx.logger.debug("dataset properties: %j", properties);
// deleteStrategy
const delete_strategy = _.get(
driver.options,
"_private.csi.volume.deleteStrategy",
""
);
if (delete_strategy == "retain") {
return {};
}
// remove share resources
await this.deleteShare(call, datasetName);

View File

@ -119,7 +119,11 @@ class Api {
return 2;
}
return 1;
if (systemVersion.v1) {
return 1;
}
return 2;
}
async getIsFreeNAS() {
@ -239,7 +243,7 @@ class Api {
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
*/
try {
response = await httpClient.get(endpoint);
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
versionResponses.v2 = response;
if (response.statusCode == 200) {
versionInfo.v2 = response.body;
@ -263,7 +267,7 @@ class Api {
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
*/
try {
response = await httpClient.get(endpoint);
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
versionResponses.v1 = response;
if (response.statusCode == 200 && IsJsonString(response.body)) {
versionInfo.v1 = response.body;

View File

@ -12,7 +12,7 @@ class Client {
// default to v1.0 for now
if (!this.options.apiVersion) {
this.options.apiVersion = 1;
this.options.apiVersion = 2;
}
}
@ -131,7 +131,11 @@ class Client {
delete options.httpAgent;
delete options.httpsAgent;
let duration = parseFloat((Math.round((_.get(response, 'duration', 0) + Number.EPSILON) * 100) / 100) / 1000).toFixed(2);
let duration = parseFloat(
Math.round((_.get(response, "duration", 0) + Number.EPSILON) * 100) /
100 /
1000
).toFixed(2);
this.logger.debug("FREENAS HTTP REQUEST DETAILS: " + stringify(options));
this.logger.debug("FREENAS HTTP REQUEST DURATION: " + duration + "s");
@ -140,19 +144,20 @@ class Client {
"FREENAS HTTP RESPONSE STATUS CODE: " + _.get(response, "statusCode", "")
);
this.logger.debug(
"FREENAS HTTP RESPONSE HEADERS: " + stringify(_.get(response, "headers", ""))
"FREENAS HTTP RESPONSE HEADERS: " +
stringify(_.get(response, "headers", ""))
);
this.logger.debug("FREENAS HTTP RESPONSE BODY: " + stringify(body));
}
async get(endpoint, data) {
async get(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
const options = client.getRequestCommonOptions();
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "GET";
options.url = this.getBaseURL() + endpoint;
options.params = data;
@ -167,14 +172,14 @@ class Client {
});
}
async post(endpoint, data) {
async post(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
const options = client.getRequestCommonOptions();
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "POST";
options.url = this.getBaseURL() + endpoint;
options.data = data;
@ -190,14 +195,14 @@ class Client {
});
}
async put(endpoint, data) {
async put(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
const options = client.getRequestCommonOptions();
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "PUT";
options.url = this.getBaseURL() + endpoint;
options.data = data;
@ -213,14 +218,14 @@ class Client {
});
}
async delete(endpoint, data) {
async delete(endpoint, data, options = {}) {
const client = this;
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
endpoint += "/";
}
return new Promise((resolve, reject) => {
const options = client.getRequestCommonOptions();
options = { ...client.getRequestCommonOptions(), ...options };
options.method = "DELETE";
options.url = this.getBaseURL() + endpoint;
options.data = data;

View File

@ -28,6 +28,34 @@ const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
const __REGISTRY_NS__ = "FreeNASSshDriver";
class FreeNASSshDriver extends ControllerZfsBaseDriver {
/**
* Ensure sane options are used etc
* true = ready
* false = not ready, but progressiong towards ready
* throw error = faulty setup
*
* @param {*} call
*/
async Probe(call) {
const driver = this;
if (driver.ctx.args.csiMode.includes("controller")) {
const httpApiClient = await driver.getTrueNASHttpApiClient();
try {
await httpApiClient.getSystemVersion();
} catch (err) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`TrueNAS api is unavailable: ${err.getMessage()}`
);
}
return super.Probe(...arguments);
} else {
return super.Probe(...arguments);
}
}
getExecClient() {
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
return new SshClient({
@ -231,8 +259,17 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
const apiVersion = httpClient.getApiVersion();
const zb = await this.getZetabyte();
const truenasVersion = semver.coerce(
await httpApiClient.getSystemVersionMajorMinor()
await httpApiClient.getSystemVersionMajorMinor(),
{ loose: true }
);
if (!truenasVersion) {
throw new GrpcError(
grpc.status.UNKNOWN,
`unable to detect TrueNAS version`
);
}
const isScale = await httpApiClient.getIsScale();
let volume_context;
@ -1996,7 +2033,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
this.ctx.logger.debug("zfs props data: %j", properties);
let iscsiName =
properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
// name correlates to the extent NOT the target
let kName = iscsiName.replaceAll(".", "_");
@ -2012,11 +2049,22 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
*
* midclt resync_lun_size_for_zvol tank/foo/bar
* works on SCALE only ^
*
*/
command = execClient.buildCommand("sh", [
"-c",
`echo 1 > /sys/kernel/scst_tgt/devices/${kName}/resync_size`,
]);
if (process.env.DEMOCRATIC_CSI_IS_CONTAINER == "true") {
// use the built-in wrapper script that works with sudo
command = execClient.buildCommand("simple-file-writer", [
"1",
`/sys/kernel/scst_tgt/devices/${kName}/resync_size`,
]);
} else {
// TODO: syntax fails with sudo
command = execClient.buildCommand("sh", [
"-c",
`echo 1 > /sys/kernel/scst_tgt/devices/${kName}/resync_size`,
]);
}
reload = true;
} else {
switch (apiVersion) {
@ -2086,7 +2134,11 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
return 2;
}
return 1;
if (systemVersion.v1) {
return 1;
}
return 2;
}
async getIsFreeNAS() {
@ -2211,7 +2263,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
*/
try {
response = await httpClient.get(endpoint);
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
versionResponses.v2 = response;
if (response.statusCode == 200) {
versionInfo.v2 = response.body;
@ -2235,7 +2287,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
*/
try {
response = await httpClient.get(endpoint);
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
versionResponses.v1 = response;
if (response.statusCode == 200 && IsJsonString(response.body)) {
versionInfo.v1 = response.body;

View File

@ -7,6 +7,7 @@ const k8s = require("@kubernetes/client-node");
const { GrpcError, grpc } = require("../utils/grpc");
const Handlebars = require("handlebars");
const { Mount } = require("../utils/mount");
const { ObjectiveFS } = require("../utils/objectivefs");
const { OneClient } = require("../utils/oneclient");
const { Filesystem } = require("../utils/filesystem");
const { ISCSI } = require("../utils/iscsi");
@ -181,6 +182,18 @@ class CsiBaseDriver {
});
}
getDefaultObjectiveFSInstance() {
const driver = this;
return registry.get(
`${__REGISTRY_NS__}:default_objectivefs_instance`,
() => {
return new ObjectiveFS({
pool: _.get(driver.options, "objectivefs.pool"),
});
}
);
}
/**
*
* @returns CsiProxyClient
@ -456,6 +469,9 @@ class CsiBaseDriver {
/**
* technically zfs allows `:` and `.` in addition to `_` and `-`
* TODO: make this more specific to each driver
* in particular Nomad per-alloc feature uses names with <name>-[<index>] syntax so square brackets are present
* TODO: allow for replacing chars vs absolute failure?
*/
let invalid_chars;
invalid_chars = volume_id.match(/[^a-z0-9_\-]/gi);
@ -728,6 +744,7 @@ class CsiBaseDriver {
}
switch (node_attach_driver) {
case "objectivefs":
case "oneclient":
// move along
break;
@ -801,10 +818,11 @@ class CsiBaseDriver {
if (!has_guest) {
mount_flags.push("guest");
}
}
if (volume_mount_group) {
mount_flags.push(`gid=${volume_mount_group}`);
}
// handle node service VOLUME_MOUNT_GROUP
if (volume_mount_group) {
mount_flags.push(`gid=${volume_mount_group}`);
}
break;
case "iscsi":
@ -897,12 +915,15 @@ class CsiBaseDriver {
);
}
const sessionParsedPortal = iscsi.parsePortal(session.portal);
// rescan in scenarios when login previously occurred but volumes never appeared
await iscsi.iscsiadm.rescanSession(session);
// find device name
device = iscsi.devicePathByPortalIQNLUN(
iscsiConnection.portal,
device = await iscsi.devicePathByPortalIQNLUN(
//iscsiConnection.portal,
`${sessionParsedPortal.host}:${sessionParsedPortal.port}`,
iscsiConnection.iqn,
iscsiConnection.lun
);
@ -1246,6 +1267,79 @@ class CsiBaseDriver {
return {};
}
break;
case "objectivefs":
let objectivefs = driver.getDefaultObjectiveFSInstance();
let ofs_filesystem = volume_context.filesystem;
let env = {};
for (const key in normalizedSecrets) {
if (key.startsWith("env.")) {
env[key.substr("env.".length)] = normalizedSecrets[key];
}
}
for (const key in volume_context) {
if (key.startsWith("env.")) {
env[key.substr("env.".length)] = volume_context[key];
}
}
if (!ofs_filesystem) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`missing ofs volume filesystem`
);
}
let ofs_object_store = env["OBJECTSTORE"];
if (!ofs_object_store) {
ofs_object_store = await objectivefs.getObjectStoreFromFilesystem(
ofs_filesystem
);
if (ofs_object_store) {
env["OBJECTSTORE"] = ofs_object_store;
}
}
if (!ofs_object_store) {
throw new GrpcError(
grpc.status.FAILED_PRECONDITION,
`missing required ofs volume env.OBJECTSTORE`
);
}
// normalize fs to not include objectstore
ofs_filesystem = await objectivefs.stripObjectStoreFromFilesystem(
ofs_filesystem
);
device = `${ofs_object_store}${ofs_filesystem}`;
result = await mount.deviceIsMountedAtPath(
device,
staging_target_path
);
if (result) {
return {};
}
result = await objectivefs.mount(
env,
ofs_filesystem,
staging_target_path,
mount_flags
);
if (result) {
return {};
}
throw new GrpcError(
grpc.status.UNKNOWN,
`failed to mount objectivefs: ${device}`
);
break;
case "oneclient":
let oneclient = driver.getDefaultOneClientInstance();
@ -2932,6 +3026,7 @@ class CsiBaseDriver {
case "nfs":
case "smb":
case "lustre":
case "objectivefs":
case "oneclient":
case "hostpath":
case "iscsi":

View File

@ -121,6 +121,10 @@ class NodeManualDriver extends CsiBaseDriver {
driverResourceType = "filesystem";
fs_types = ["lustre"];
break;
case "objectivefs":
driverResourceType = "filesystem";
fs_types = ["objectivefs", "fuse.objectivefs"];
break;
case "oneclient":
driverResourceType = "filesystem";
fs_types = ["oneclient", "fuse.oneclient"];

View File

@ -181,6 +181,20 @@ function stringify(value) {
return JSON.stringify(value, getCircularReplacer());
}
function before_string(target, search) {
if (!target.includes(search)) {
return "";
}
return target.substring(0, target.indexOf(search));
}
function after_string(target, search) {
if (!target.includes(search)) {
return "";
}
return target.substring(target.indexOf(search) + search.length);
}
function default_supported_block_filesystems() {
return ["btrfs", "exfat", "ext3", "ext4", "ext4dev", "ntfs", "vfat", "xfs"];
}
@ -266,6 +280,8 @@ module.exports.crc8 = crc8;
module.exports.lockKeysFromRequest = lockKeysFromRequest;
module.exports.getLargestNumber = getLargestNumber;
module.exports.stringify = stringify;
module.exports.before_string = before_string;
module.exports.after_string = after_string;
module.exports.stripWindowsDriveLetter = stripWindowsDriveLetter;
module.exports.hasWindowsDriveLetter = hasWindowsDriveLetter;
module.exports.axios_request = axios_request;

View File

@ -1,5 +1,6 @@
const cp = require("child_process");
const { sleep } = require("./general");
const { hostname_lookup, sleep } = require("./general");
const net = require("net");
function getIscsiValue(value) {
if (value == "<empty>") return null;
@ -179,12 +180,34 @@ class ISCSI {
const sessions = await iscsi.iscsiadm.getSessions();
let parsedPortal = iscsi.parsePortal(portal);
let parsedPortalHostIP = "";
if (parsedPortal.host) {
// if host is not an ip address
if (net.isIP(parsedPortal.host) == 0) {
// ipv6 response is without []
parsedPortalHostIP =
(await hostname_lookup(parsedPortal.host)) || "";
}
}
// set invalid hostname/ip string to ensure empty values do not errantly pass
if (!parsedPortalHostIP) {
parsedPortalHostIP = "--------------------------------------";
}
let session = false;
sessions.every((i_session) => {
// [2a10:4741:36:28:e61d:2dff:fe90:80fe]:3260
// i_session.portal includes [] for ipv6
if (
`${i_session.iqn}` == tgtIQN &&
(portal == i_session.portal ||
`[${parsedPortal.host}]:${parsedPortal.port}` == i_session.portal)
`${parsedPortal.host}:${parsedPortal.port}` == i_session.portal ||
`${parsedPortalHostIP}:${parsedPortal.port}` ==
i_session.portal ||
`[${parsedPortal.host}]:${parsedPortal.port}` ==
i_session.portal ||
`[${parsedPortalHostIP}]:${parsedPortal.port}` ==
i_session.portal)
) {
session = i_session;
return false;
@ -560,11 +583,12 @@ class ISCSI {
};
}
devicePathByPortalIQNLUN(portal, iqn, lun) {
async devicePathByPortalIQNLUN(portal, iqn, lun, options = {}) {
const parsedPortal = this.parsePortal(portal);
const portalHost = parsedPortal.host
.replaceAll("[", "")
.replaceAll("]", "");
let portalHost = parsedPortal.host.replaceAll("[", "").replaceAll("]", "");
if (options.hostname_lookup && net.isIP(portalHost) == 0) {
portalHost = (await hostname_lookup(portalHost)) || portalHost;
}
return `/dev/disk/by-path/ip-${portalHost}:${parsedPortal.port}-iscsi-${iqn}-lun-${lun}`;
}

349
src/utils/kopia.js Normal file
View File

@ -0,0 +1,349 @@
const _ = require("lodash");
const cp = require("child_process");
const uuidv4 = require("uuid").v4;
const DEFAULT_TIMEOUT = process.env.KOPIA_DEFAULT_TIMEOUT || 90000;
/**
* https://kopia.io/
*/
class Kopia {
constructor(options = {}) {
const kopia = this;
kopia.options = options;
kopia.client_intance_uuid = uuidv4();
options.paths = options.paths || {};
if (!options.paths.kopia) {
options.paths.kopia = "kopia";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.env) {
options.env = {};
}
options.env[
"KOPIA_CONFIG_PATH"
] = `/tmp/kopia/${kopia.client_intance_uuid}/repository.config`;
options.env["KOPIA_CHECK_FOR_UPDATES"] = "false";
options.env[
"KOPIA_CACHE_DIRECTORY"
] = `/tmp/kopia/${kopia.client_intance_uuid}/cache`;
options.env[
"KOPIA_LOG_DIR"
] = `/tmp/kopia/${kopia.client_intance_uuid}/log`;
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
};
}
if (!options.logger) {
options.logger = console;
}
options.logger.info(
`kopia client instantiated with client_instance_uuid: ${kopia.client_intance_uuid}`
);
if (!options.global_flags) {
options.global_flags = [];
}
}
/**
* kopia repository connect
*
* https://kopia.io/docs/reference/command-line/common/repository-connect-from-config/
*
* --override-hostname
* --override-username
*
* @param {*} options
*/
async repositoryConnect(options = []) {
const kopia = this;
let args = ["repository", "connect"];
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
try {
await kopia.exec(kopia.options.paths.kopia, args);
return;
} catch (err) {
throw err;
}
}
/**
* kopia repository status
*
* @param {*} options
*/
async repositoryStatus(options = []) {
const kopia = this;
let args = ["repository", "status", "--json"];
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args);
return result;
} catch (err) {
throw err;
}
}
/**
* kopia snapshot list
*
* @param {*} options
*/
async snapshotList(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "list", "--json"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-list",
});
return result.parsed;
} catch (err) {
throw err;
}
}
/**
* kopia snapshot list
*
* @param {*} snapshot_id
*/
async snapshotGet(snapshot_id) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "list", "--json", "--all"]);
args = args.concat(kopia.options.global_flags);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-list",
});
return result.parsed.find((item) => {
return item.id == snapshot_id;
});
} catch (err) {
throw err;
}
}
/**
* kopia snapshot create
*
* @param {*} options
*/
async snapshotCreate(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "create", "--json"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-create",
});
return result.parsed;
} catch (err) {
throw err;
}
}
/**
* kopia snapshot delete <id>
*
* @param {*} options
*/
async snapshotDelete(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "delete", "--delete"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-delete",
});
return result;
} catch (err) {
if (
err.code == 1 &&
(err.stderr.includes("no snapshots matched") ||
err.stderr.includes("invalid content hash"))
) {
return;
}
throw err;
}
}
/**
* kopia snapshot restore <snapshot_id[/sub/path]> /path/to/restore/to
*
* @param {*} options
*/
async snapshotRestore(options = []) {
const kopia = this;
let args = [];
args = args.concat(["snapshot", "restore"]);
args = args.concat(kopia.options.global_flags);
args = args.concat(options);
let result;
try {
result = await kopia.exec(kopia.options.paths.kopia, args, {
operation: "snapshot-restore",
});
return result;
} catch (err) {
if (
err.code == 1 &&
(err.stderr.includes("no snapshots matched") ||
err.stderr.includes("invalid content hash"))
) {
return;
}
throw err;
}
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const kopia = this;
args = args || [];
if (kopia.options.sudo) {
args.unshift(command);
command = kopia.options.paths.sudo;
}
options.env = {
...{},
...process.env,
...kopia.options.env,
...options.env,
};
let tokenIndex = args.findIndex((value) => {
return value.trim() == "--token";
});
let cleansedArgs = [...args];
if (tokenIndex >= 0) {
cleansedArgs[tokenIndex + 1] = "redacted";
}
const cleansedLog = `${command} ${cleansedArgs.join(" ")}`;
console.log("executing kopia command: %s", cleansedLog);
return new Promise((resolve, reject) => {
let stdin;
if (options.stdin) {
stdin = options.stdin;
delete options.stdin;
}
const child = kopia.options.executor.spawn(command, args, options);
if (stdin) {
child.stdin.write(stdin);
}
let stdout = "";
let stderr = "";
const log_progress_output = _.debounce(
(data) => {
const lines = data.split("\n");
/**
* get last line, remove spinner, etc
*/
const line = lines
.slice(-1)[0]
.trim()
.replace(/^[\/\\\-\|] /gi, "");
kopia.options.logger.info(
`kopia ${options.operation} progress: ${line.trim()}`
);
},
250,
{ leading: true, trailing: true, maxWait: 5000 }
);
child.stdout.on("data", function (data) {
data = String(data);
stdout += data;
});
child.stderr.on("data", function (data) {
data = String(data);
stderr += data;
switch (options.operation) {
case "snapshot-create":
log_progress_output(data);
break;
default:
break;
}
});
child.on("close", function (code) {
const result = { code, stdout, stderr, timeout: false };
if (!result.parsed) {
try {
result.parsed = JSON.parse(result.stdout);
} catch (err) {}
}
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.Kopia = Kopia;

369
src/utils/objectivefs.js Normal file
View File

@ -0,0 +1,369 @@
const cp = require("child_process");
const GeneralUtils = require("./general");
const DEFAULT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
const EXIT_CODES = {
64: "administrator can not mount filesystems",
65: "unable to decrypt using passphrase",
78: "missing or invalid passphrase",
};
/**
* https://objectivefs.com/
*/
class ObjectiveFS {
constructor(options = {}) {
const objectivefs = this;
objectivefs.options = options;
options.paths = options.paths || {};
if (!options.paths.objectivefs) {
options.paths.objectivefs = "mount.objectivefs";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.env) {
options.env = {};
}
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
//spawn: cp.execFile,
};
}
}
/**
* mount.objectivefs [-o <opt>[,<opt>]..] <filesystem> <dir>
*
* @param {*} env
* @param {*} filesystem
* @param {*} target
* @param {*} options
*/
async mount(env, filesystem, target, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = [];
if (options.length > 0) {
// TODO: maybe do -o <opt> -o <opt>?
args = args.concat(["-o", options.join(",")]);
}
args = args.concat([filesystem, target]);
let result;
try {
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env, operation: "mount" }
);
return result;
} catch (err) {
throw err;
}
}
/**
* mount.objectivefs create <your filesystem name>
* mount.objectivefs create -f <bucket>/<fs>
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async create(env, filesystem, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = ["create"];
args = args.concat(options);
args = args.concat([filesystem]);
let result;
try {
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env }
);
return result;
} catch (err) {
if (err.code == 1 && err.stderr.includes("filesystem already exists")) {
return;
}
throw err;
}
}
/**
* echo 'y' | mount.objectivefs destroy <bucket>/<fs>
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async destroy(env, filesystem, options = []) {
const objectivefs = this;
if (!env) {
env = {};
}
filesystem = await objectivefs.stripObjectStoreFromFilesystem(filesystem);
/**
* delete safety checks for filesystem
*
* while it is possible to delete a fs without a pool we
* should never be doing that in democratic-csi
*/
let fs_parts = filesystem.split("/");
if (fs_parts.length != 2) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
if (!fs_parts[0]) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
let pool = objectivefs.options.pool;
pool = await objectivefs.stripObjectStoreFromFilesystem(pool);
if (!pool) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
if (fs_parts[0].trim() != pool.trim()) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
if (!fs_parts[1]) {
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
}
let args = ["destroy"];
args = args.concat(options);
args = args.concat([filesystem]);
let result;
try {
result = await objectivefs.exec(
"/bin/bash",
[
"-c",
`echo y | ${objectivefs.options.paths.objectivefs} ${args.join(" ")}`,
],
{ env }
);
return result;
} catch (err) {
if (
err.code == 68 &&
err.stdout.includes("does not look like an ObjectiveFS filesystem")
) {
return;
}
throw err;
}
}
parseListOutput(data) {
const lines = data.split("\n");
let headers = [];
let entries = [];
lines.forEach((line, i) => {
if (line.length < 1) {
return;
}
const parts = line.split("\t");
if (i == 0) {
headers = parts.map((header) => {
return header.trim();
});
return;
}
let entry = {};
headers.forEach((name, index) => {
entry[name.trim()] = parts[index].trim();
});
entries.push(entry);
});
return entries;
}
/**
* mount.objectivefs list [-asvz] [<filesystem>[@<time>]]
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async list(env, filesystem = null, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = ["list"];
args = args.concat(options);
if (filesystem) {
args = args.concat([filesystem]);
}
let result;
try {
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env }
);
return objectivefs.parseListOutput(result.stdout);
} catch (err) {
throw err;
}
}
/**
* mount.objectivefs snapshot <filesystem>
*
* NOTE: fs must be mount on node to function
*
* @param {*} env
* @param {*} filesystem
* @param {*} options
*/
async snapshot(env, filesystem = null, options = []) {
if (!env) {
env = {};
}
const objectivefs = this;
let args = ["list"];
args = args.concat(options);
if (filesystem) {
args = args.concat([filesystem]);
}
let result;
try {
// NOTE: Successfully created snapshot: minio://ofs/test@2024-02-13T07:56:38Z (2024-02-13T00:56:38)
result = await objectivefs.exec(
objectivefs.options.paths.objectivefs,
args,
{ env }
);
return result;
} catch (err) {
throw err;
}
}
async getObjectStoreFromFilesystem(filesystem) {
if (filesystem.includes("://")) {
return GeneralUtils.before_string("://");
}
}
async stripObjectStoreFromFilesystem(filesystem) {
if (filesystem.includes("://")) {
return GeneralUtils.after_string("://");
}
return filesystem;
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const objectivefs = this;
args = args || [];
if (objectivefs.options.sudo) {
args.unshift(command);
command = objectivefs.options.paths.sudo;
}
options.env = { ...{}, ...objectivefs.options.env, ...options.env };
// truncate admin key during mount operations
if (options.operation == "mount") {
delete options.operation;
// standard license is 24
// admin key is 8
if (
options.env.OBJECTIVEFS_LICENSE &&
options.env.OBJECTIVEFS_LICENSE.length > 24
) {
options.env.OBJECTIVEFS_LICENSE =
options.env.OBJECTIVEFS_LICENSE.substr(0, 24);
}
}
options.env.PATH = process.env.PATH;
const cleansedLog = `${command} ${args.join(" ")}`;
console.log("executing objectivefs command: %s", cleansedLog);
//console.log(options.env);
return new Promise((resolve, reject) => {
let stdin;
if (options.stdin) {
stdin = options.stdin;
delete options.stdin;
}
const child = objectivefs.options.executor.spawn(command, args, options);
if (stdin) {
child.stdin.write(stdin);
}
let stdout = "";
let stderr = "";
child.stdout.on("data", function (data) {
stdout = stdout + data;
});
child.stderr.on("data", function (data) {
stderr = stderr + data;
});
child.on("close", function (code) {
if (!stderr && EXIT_CODES[code]) {
stderr += EXIT_CODES[code];
}
const result = { code, stdout, stderr, timeout: false };
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.ObjectiveFS = ObjectiveFS;

View File

@ -3,3 +3,7 @@ if (typeof String.prototype.replaceAll == "undefined") {
return this.replace(new RegExp(match, "g"), () => replace);
};
}
Array.prototype.random = function () {
return this[Math.floor(Math.random() * this.length)];
};

494
src/utils/restic.js Normal file
View File

@ -0,0 +1,494 @@
const _ = require("lodash");
const cp = require("child_process");
const DEFAULT_TIMEOUT = process.env.RESTIC_DEFAULT_TIMEOUT || 90000;
/**
* https://restic.net/
*/
class Restic {
constructor(options = {}) {
const restic = this;
restic.options = options;
options.paths = options.paths || {};
if (!options.paths.restic) {
options.paths.restic = "restic";
}
if (!options.paths.sudo) {
options.paths.sudo = "/usr/bin/sudo";
}
if (!options.paths.chroot) {
options.paths.chroot = "/usr/sbin/chroot";
}
if (!options.env) {
options.env = {};
}
if (!options.executor) {
options.executor = {
spawn: cp.spawn,
};
}
if (!options.logger) {
options.logger = console;
}
if (!options.global_flags) {
options.global_flags = [];
}
}
/**
* restic init
*
* @param {*} options
*/
async init(options = []) {
const restic = this;
let args = ["init", "--json"];
args = args.concat(restic.options.global_flags);
args = args.concat(options);
try {
await restic.exec(restic.options.paths.restic, args);
return;
} catch (err) {
if (err.code == 1 && err.stderr.includes("already")) {
return;
}
throw err;
}
}
/**
* restic unlock
*
* @param {*} options
*/
async unlock(options = []) {
const restic = this;
let args = ["unlock", "--json"];
args = args.concat(restic.options.global_flags);
args = args.concat(options);
try {
await restic.exec(restic.options.paths.restic, args);
return;
} catch (err) {
throw err;
}
}
/**
* restic backup
*
* @param {*} path
* @param {*} options
*/
async backup(path, options = []) {
const restic = this;
let args = [];
args = args.concat(["backup", "--json"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
args = args.concat([path]);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "backup",
timeout: 0,
});
return result;
} catch (err) {
throw err;
}
}
/**
* restic tag
*
* @param {*} options
*/
async tag(options = []) {
const restic = this;
let args = [];
args = args.concat(["tag", "--json"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "tag",
});
return result;
} catch (err) {
throw err;
}
}
/**
* restic snapshots
*
* @param {*} options
*/
async snapshots(options = []) {
const restic = this;
let args = [];
args = args.concat(["snapshots", "--json", "--no-lock"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
restic.parseTagsFromArgs(args);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "snapshots",
});
let snapshots = [];
result.parsed.forEach((item) => {
if (item.id) {
snapshots.push(item);
}
if (item.snapshots) {
snapshots.push(...item.snapshots);
}
});
return snapshots;
} catch (err) {
throw err;
}
}
/**
* restic snapshots
*
* @param {*} options
*/
async snapshot_exists(snapshot_id) {
const restic = this;
const snapshots = await restic.snapshots([snapshot_id]);
return snapshots.length > 0;
}
/**
* restic forget
*
* @param {*} options
*/
async forget(options = []) {
const restic = this;
let args = [];
args = args.concat(["forget", "--json"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "forget",
});
return result.parsed;
} catch (err) {
if (err.code == 1 && err.stderr.includes("no such file or directory")) {
return [];
}
throw err;
}
}
/**
* restic stats
*
* @param {*} options
*/
async stats(options = []) {
const restic = this;
let args = [];
args = args.concat(["stats", "--json", "--no-lock"]);
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "stats",
timeout: 0, // can take a very long time to gather up details
});
return result.parsed;
} catch (err) {
throw err;
}
}
/**
* restic restore
*
* note that restore does not do any delete operations (ie: not like rsync --delete)
*
* @param {*} options
*/
async restore(options = []) {
const restic = this;
let args = ["restore", "--json", "--no-lock"];
args = args.concat(restic.options.global_flags);
args = args.concat(options);
let result;
try {
result = await restic.exec(restic.options.paths.restic, args, {
operation: "restore",
timeout: 0,
});
return result.parsed;
} catch (err) {
if (err.code == 1 && err.stderr.includes("Fatal:")) {
const lines = err.stderr.split("\n").filter((item) => {
return Boolean(String(item).trim());
});
const last_line = lines[lines.length - 1];
const ingored_count = (err.stderr.match(/ignoring error/g) || [])
.length;
restic.options.logger.info(
`restic ignored error count: ${ingored_count}`
);
restic.options.logger.info(`restic stderr last line: ${last_line}`);
// if ignored count matches total count move on
// "Fatal: There were 2484 errors"
if (last_line.includes(String(ingored_count))) {
return err;
}
}
throw err;
}
}
trimResultData(result, options = {}) {
const trim_output_limt = options.max_entries || 50;
// trim stdout/stderr/parsed lines to X number
if (result.parsed && Array.isArray(result.parsed)) {
result.parsed = result.parsed.slice(trim_output_limt * -1);
}
result.stderr = result.stderr
.split("\n")
.slice(trim_output_limt * -1)
.join("\n");
result.stdout = result.stdout
.split("\n")
.slice(trim_output_limt * -1)
.join("\n");
return result;
}
parseTagsFromArgs(args) {
let tag_value_index;
let tags = args.filter((value, index) => {
if (String(value) == "--tag") {
tag_value_index = index + 1;
}
return tag_value_index == index;
});
tags = tags
.map((value) => {
if (value.includes(",")) {
return value.split(",");
}
return [value];
})
.flat();
return tags;
}
exec(command, args, options = {}) {
if (!options.hasOwnProperty("timeout")) {
options.timeout = DEFAULT_TIMEOUT;
}
const restic = this;
args = args || [];
if (restic.options.sudo) {
args.unshift(command);
command = restic.options.paths.sudo;
}
options.env = {
...{},
...process.env,
...restic.options.env,
...options.env,
};
const cleansedLog = `${command} ${args.join(" ")}`;
console.log("executing restic command: %s", cleansedLog);
return new Promise((resolve, reject) => {
let stdin;
if (options.stdin) {
stdin = options.stdin;
delete options.stdin;
}
const child = restic.options.executor.spawn(command, args, options);
if (stdin) {
child.stdin.write(stdin);
}
let stdout = "";
let stderr = "";
let code_override;
const log_progress_output = _.debounce(
(data) => {
let snapshot_id;
let path;
switch (options.operation) {
case "backup":
snapshot_id = `unknown_creating_new_snapshot_in_progress`;
path = args[args.length - 1];
break;
case "restore":
snapshot_id = args
.find((value) => {
return String(value).includes(":");
})
.split(":")[0];
let path_index;
path = args.find((value, index) => {
if (String(value) == "--target") {
path_index = index + 1;
}
return path_index == index;
});
break;
default:
return;
}
if (data.message_type == "status") {
delete data.current_files;
restic.options.logger.info(
`restic ${options.operation} progress: snapshot_id=${snapshot_id}, path=${path}`,
data
);
}
if (data.message_type == "summary") {
restic.options.logger.info(
`restic ${options.operation} summary: snapshot_id=${snapshot_id}, path=${path}`,
data
);
}
},
250,
{ leading: true, trailing: true, maxWait: 5000 }
);
child.stdout.on("data", function (data) {
data = String(data);
stdout += data;
switch (options.operation) {
case "backup":
case "restore":
try {
let parsed = JSON.parse(data);
log_progress_output(parsed);
} catch (err) {}
break;
}
});
child.stderr.on("data", function (data) {
data = String(data);
stderr += data;
if (
["forget", "snapshots"].includes(options.operation) &&
stderr.includes("no such file or directory")
) {
// short-circut the operation vs waiting for all the retries
// https://github.com/restic/restic/pull/2515
switch (options.operation) {
case "forget":
code_override = 1;
break;
case "snapshots":
code_override = 0;
break;
}
child.kill();
}
});
child.on("close", function (code) {
const result = { code, stdout, stderr, timeout: false };
if (!result.parsed) {
try {
result.parsed = JSON.parse(result.stdout);
} catch (err) {}
}
if (!result.parsed) {
try {
const lines = result.stdout.split("\n");
const parsed = [];
lines.forEach((line) => {
if (!line) {
return;
}
parsed.push(JSON.parse(line.trim()));
});
result.parsed = parsed;
} catch (err) {}
}
/**
* normalize array responses in scenarios where not enough came through
* to add newlines
*/
if (result.parsed && options.operation == "backup") {
if (!Array.isArray(result.parsed)) {
result.parsed = [result.parsed];
}
}
if (code == null && code_override != null) {
code = code_override;
}
// timeout scenario
if (code === null) {
result.timeout = true;
reject(result);
}
if (code) {
reject(result);
} else {
resolve(result);
}
});
});
}
}
module.exports.Restic = Restic;