Compare commits
74 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
8193b689ed | |
|
|
4e402645d9 | |
|
|
31f215c55a | |
|
|
4bda571e52 | |
|
|
7648b0d015 | |
|
|
8b0b7b8b4f | |
|
|
4ec9ff23f2 | |
|
|
8c5ac0a2c7 | |
|
|
6c30c7ca50 | |
|
|
7595191a13 | |
|
|
6af367fbb6 | |
|
|
dd604f4d3a | |
|
|
2e7eed890e | |
|
|
7206429913 | |
|
|
23e6ecb1fa | |
|
|
38bee217dd | |
|
|
a6dec24a70 | |
|
|
1aae49462d | |
|
|
e95b4c6b47 | |
|
|
089462f9d4 | |
|
|
d7919e766d | |
|
|
7911bc9200 | |
|
|
c6c39975f2 | |
|
|
6570181506 | |
|
|
d73183e841 | |
|
|
6198edfa1a | |
|
|
b7a3c08087 | |
|
|
27eb354590 | |
|
|
f607c2a6d5 | |
|
|
536f954e0a | |
|
|
98c55987ca | |
|
|
104a4d5bce | |
|
|
13940fa8bf | |
|
|
a8a20b9690 | |
|
|
c6c1f1a264 | |
|
|
57f74ce665 | |
|
|
95f2ae2d2c | |
|
|
6ada2684f3 | |
|
|
28f8af3147 | |
|
|
8a1ac03b9f | |
|
|
ed32cf8db0 | |
|
|
d70b45b909 | |
|
|
784ce31922 | |
|
|
0487cbcc7e | |
|
|
c95b90b041 | |
|
|
a36b73bf23 | |
|
|
15b120c3cc | |
|
|
7a31c1ba36 | |
|
|
5a8ee64957 | |
|
|
1aafa064a4 | |
|
|
4ff51a8e0a | |
|
|
a95a6d9268 | |
|
|
1e2ca16632 | |
|
|
79f16a0cf6 | |
|
|
300cae30fd | |
|
|
4fc3c15133 | |
|
|
1129c4120a | |
|
|
b8b1188a14 | |
|
|
339e952d1c | |
|
|
f27a359f7c | |
|
|
0dc3ecccdf | |
|
|
8238e1bead | |
|
|
4e2681b8bc | |
|
|
a9e5ff07d6 | |
|
|
1856b4fa29 | |
|
|
01eed24cb7 | |
|
|
d2b9068a23 | |
|
|
764260fff7 | |
|
|
fcaa64e612 | |
|
|
a3df4bcca0 | |
|
|
d476a5721b | |
|
|
05e9d93284 | |
|
|
80abab1b9b | |
|
|
537497470d |
|
|
@ -16,6 +16,7 @@ if [[ -n "${IMAGE_TAG}" ]]; then
|
||||||
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \
|
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \
|
||||||
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
|
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
|
||||||
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
|
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
|
||||||
|
--build-arg OBJECTIVEFS_DOWNLOAD_ID=${OBJECTIVEFS_DOWNLOAD_ID} \
|
||||||
.
|
.
|
||||||
else
|
else
|
||||||
:
|
:
|
||||||
|
|
|
||||||
|
|
@ -15,23 +15,23 @@ jobs:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Cancel Previous Runs
|
- name: Cancel Previous Runs
|
||||||
uses: styfle/cancel-workflow-action@0.11.0
|
uses: styfle/cancel-workflow-action@0.12.1
|
||||||
with:
|
with:
|
||||||
access_token: ${{ github.token }}
|
access_token: ${{ github.token }}
|
||||||
|
|
||||||
build-npm-linux-amd64:
|
build-npm-linux-amd64:
|
||||||
runs-on: ubuntu-20.04
|
runs-on: ubuntu-20.04
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v3
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 16
|
node-version: 20
|
||||||
- shell: bash
|
- shell: bash
|
||||||
name: npm install
|
name: npm install
|
||||||
run: |
|
run: |
|
||||||
ci/bin/build.sh
|
ci/bin/build.sh
|
||||||
- name: upload build
|
- name: upload build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
path: node_modules-linux-amd64.tar.gz
|
path: node_modules-linux-amd64.tar.gz
|
||||||
|
|
@ -40,16 +40,16 @@ jobs:
|
||||||
build-npm-windows-amd64:
|
build-npm-windows-amd64:
|
||||||
runs-on: windows-2022
|
runs-on: windows-2022
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/setup-node@v3
|
- uses: actions/setup-node@v4
|
||||||
with:
|
with:
|
||||||
node-version: 16
|
node-version: 20
|
||||||
- shell: pwsh
|
- shell: pwsh
|
||||||
name: npm install
|
name: npm install
|
||||||
run: |
|
run: |
|
||||||
ci\bin\build.ps1
|
ci\bin\build.ps1
|
||||||
- name: upload build
|
- name: upload build
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-windows-amd64
|
name: node-modules-windows-amd64
|
||||||
path: node_modules-windows-amd64.tar.gz
|
path: node_modules-windows-amd64.tar.gz
|
||||||
|
|
@ -69,8 +69,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-synology
|
- csi-sanity-synology
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -99,8 +99,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-synology
|
- csi-sanity-synology
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -115,17 +115,18 @@ jobs:
|
||||||
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
|
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
|
||||||
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
|
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
|
||||||
|
|
||||||
csi-sanity-truenas-scale-22_12:
|
csi-sanity-truenas-scale-24_04:
|
||||||
needs:
|
needs:
|
||||||
- build-npm-linux-amd64
|
- build-npm-linux-amd64
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 1
|
||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
- truenas/scale/22.12/scale-iscsi.yaml
|
- truenas/scale/24.04/scale-iscsi.yaml
|
||||||
- truenas/scale/22.12/scale-nfs.yaml
|
- truenas/scale/24.04/scale-nfs.yaml
|
||||||
# 80 char limit
|
# 80 char limit
|
||||||
- truenas/scale/22.12/scale-smb.yaml
|
- truenas/scale/24.04/scale-smb.yaml
|
||||||
runs-on:
|
runs-on:
|
||||||
- self-hosted
|
- self-hosted
|
||||||
- Linux
|
- Linux
|
||||||
|
|
@ -133,8 +134,8 @@ jobs:
|
||||||
#- csi-sanity-truenas
|
#- csi-sanity-truenas
|
||||||
- csi-sanity-zfs-generic
|
- csi-sanity-zfs-generic
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -143,7 +144,7 @@ jobs:
|
||||||
ci/bin/run.sh
|
ci/bin/run.sh
|
||||||
env:
|
env:
|
||||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||||
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_22_12_HOST }}
|
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_24_04_HOST }}
|
||||||
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
|
||||||
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
|
||||||
|
|
||||||
|
|
@ -153,6 +154,7 @@ jobs:
|
||||||
- build-npm-linux-amd64
|
- build-npm-linux-amd64
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 1
|
||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
- truenas/core/13.0/core-iscsi.yaml
|
- truenas/core/13.0/core-iscsi.yaml
|
||||||
|
|
@ -166,8 +168,8 @@ jobs:
|
||||||
#- csi-sanity-truenas
|
#- csi-sanity-truenas
|
||||||
- csi-sanity-zfs-generic
|
- csi-sanity-zfs-generic
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -186,6 +188,7 @@ jobs:
|
||||||
- build-npm-linux-amd64
|
- build-npm-linux-amd64
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
|
max-parallel: 1
|
||||||
matrix:
|
matrix:
|
||||||
config:
|
config:
|
||||||
- zfs-generic/iscsi.yaml
|
- zfs-generic/iscsi.yaml
|
||||||
|
|
@ -198,8 +201,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-zfs-generic
|
- csi-sanity-zfs-generic
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -212,6 +215,45 @@ jobs:
|
||||||
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
|
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
|
||||||
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
|
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
|
||||||
|
|
||||||
|
# client drivers
|
||||||
|
csi-sanity-objectivefs:
|
||||||
|
needs:
|
||||||
|
- build-npm-linux-amd64
|
||||||
|
strategy:
|
||||||
|
fail-fast: false
|
||||||
|
matrix:
|
||||||
|
config:
|
||||||
|
- objectivefs/objectivefs.yaml
|
||||||
|
runs-on:
|
||||||
|
- self-hosted
|
||||||
|
- Linux
|
||||||
|
- X64
|
||||||
|
- csi-sanity-client
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- uses: actions/download-artifact@v4
|
||||||
|
with:
|
||||||
|
name: node-modules-linux-amd64
|
||||||
|
- name: csi-sanity
|
||||||
|
run: |
|
||||||
|
# run tests
|
||||||
|
ci/bin/run.sh
|
||||||
|
env:
|
||||||
|
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||||
|
OBJECTIVEFS_POOL: ${{ secrets.SANITY_OBJECTIVEFS_POOL }}
|
||||||
|
OBJECTIVEFS_LICENSE: ${{ secrets.SANITY_OBJECTIVEFS_LICENSE }}
|
||||||
|
OBJECTIVEFS_OBJECTSTORE: ${{ secrets.SANITY_OBJECTIVEFS_OBJECTSTORE }}
|
||||||
|
OBJECTIVEFS_ENDPOINT_PROTOCOL: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PROTOCOL }}
|
||||||
|
OBJECTIVEFS_ENDPOINT_HOST: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_HOST }}
|
||||||
|
OBJECTIVEFS_ENDPOINT_PORT: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PORT }}
|
||||||
|
OBJECTIVEFS_SECRET_KEY: ${{ secrets.SANITY_OBJECTIVEFS_SECRET_KEY }}
|
||||||
|
OBJECTIVEFS_ACCESS_KEY: ${{ secrets.SANITY_OBJECTIVEFS_ACCESS_KEY }}
|
||||||
|
OBJECTIVEFS_PASSPHRASE: ${{ secrets.SANITY_OBJECTIVEFS_PASSPHRASE }}
|
||||||
|
|
||||||
|
# these secrets need to match the above secrets for staging/etc
|
||||||
|
CSI_SANITY_SECRETS: /root/csi-secrets/objectivefs-secrets.yaml
|
||||||
|
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
|
||||||
|
|
||||||
# client drivers
|
# client drivers
|
||||||
csi-sanity-client:
|
csi-sanity-client:
|
||||||
needs:
|
needs:
|
||||||
|
|
@ -228,8 +270,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-client
|
- csi-sanity-client
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -256,8 +298,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-client
|
- csi-sanity-client
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-windows-amd64
|
name: node-modules-windows-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -286,8 +328,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-zfs-local
|
- csi-sanity-zfs-local
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-linux-amd64
|
name: node-modules-linux-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -325,8 +367,8 @@ jobs:
|
||||||
- X64
|
- X64
|
||||||
- csi-sanity-local-hostpath
|
- csi-sanity-local-hostpath
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.npmartifact }}
|
name: ${{ matrix.npmartifact }}
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -349,8 +391,8 @@ jobs:
|
||||||
- Windows
|
- Windows
|
||||||
- X64
|
- X64
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: node-modules-windows-amd64
|
name: node-modules-windows-amd64
|
||||||
- name: csi-sanity
|
- name: csi-sanity
|
||||||
|
|
@ -393,9 +435,10 @@ jobs:
|
||||||
- determine-image-tag
|
- determine-image-tag
|
||||||
- csi-sanity-synology-dsm6
|
- csi-sanity-synology-dsm6
|
||||||
- csi-sanity-synology-dsm7
|
- csi-sanity-synology-dsm7
|
||||||
- csi-sanity-truenas-scale-22_12
|
- csi-sanity-truenas-scale-24_04
|
||||||
- csi-sanity-truenas-core-13_0
|
- csi-sanity-truenas-core-13_0
|
||||||
- csi-sanity-zfs-generic
|
- csi-sanity-zfs-generic
|
||||||
|
- csi-sanity-objectivefs
|
||||||
- csi-sanity-client
|
- csi-sanity-client
|
||||||
- csi-sanity-client-windows
|
- csi-sanity-client-windows
|
||||||
- csi-sanity-zfs-local
|
- csi-sanity-zfs-local
|
||||||
|
|
@ -403,7 +446,7 @@ jobs:
|
||||||
- csi-sanity-windows-node
|
- csi-sanity-windows-node
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: docker build
|
- name: docker build
|
||||||
run: |
|
run: |
|
||||||
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
|
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
|
||||||
|
|
@ -423,6 +466,7 @@ jobs:
|
||||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||||
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
|
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
|
||||||
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
|
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
|
||||||
|
OBJECTIVEFS_DOWNLOAD_ID: ${{ secrets.OBJECTIVEFS_DOWNLOAD_ID }}
|
||||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||||
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
|
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
|
||||||
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
|
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
|
||||||
|
|
@ -431,9 +475,10 @@ jobs:
|
||||||
needs:
|
needs:
|
||||||
- csi-sanity-synology-dsm6
|
- csi-sanity-synology-dsm6
|
||||||
- csi-sanity-synology-dsm7
|
- csi-sanity-synology-dsm7
|
||||||
- csi-sanity-truenas-scale-22_12
|
- csi-sanity-truenas-scale-24_04
|
||||||
- csi-sanity-truenas-core-13_0
|
- csi-sanity-truenas-core-13_0
|
||||||
- csi-sanity-zfs-generic
|
- csi-sanity-zfs-generic
|
||||||
|
- csi-sanity-objectivefs
|
||||||
- csi-sanity-client
|
- csi-sanity-client
|
||||||
- csi-sanity-client-windows
|
- csi-sanity-client-windows
|
||||||
- csi-sanity-zfs-local
|
- csi-sanity-zfs-local
|
||||||
|
|
@ -453,7 +498,7 @@ jobs:
|
||||||
nano_base_tag: ltsc2022
|
nano_base_tag: ltsc2022
|
||||||
file: Dockerfile.Windows
|
file: Dockerfile.Windows
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- name: docker build
|
- name: docker build
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -465,7 +510,7 @@ jobs:
|
||||||
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
|
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
|
||||||
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||||
- name: upload image tar
|
- name: upload image tar
|
||||||
uses: actions/upload-artifact@v3
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||||
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||||
|
|
@ -480,11 +525,11 @@ jobs:
|
||||||
- self-hosted
|
- self-hosted
|
||||||
- buildah
|
- buildah
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@v3
|
- uses: actions/checkout@v4
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: democratic-csi-windows-ltsc2019.tar
|
name: democratic-csi-windows-ltsc2019.tar
|
||||||
- uses: actions/download-artifact@v3
|
- uses: actions/download-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: democratic-csi-windows-ltsc2022.tar
|
name: democratic-csi-windows-ltsc2022.tar
|
||||||
- name: push windows images with buildah
|
- name: push windows images with buildah
|
||||||
|
|
|
||||||
61
CHANGELOG.md
61
CHANGELOG.md
|
|
@ -1,3 +1,64 @@
|
||||||
|
# v1.9.4
|
||||||
|
|
||||||
|
Release 2024-07-06
|
||||||
|
|
||||||
|
- minor doc updates
|
||||||
|
|
||||||
|
# v1.9.3
|
||||||
|
|
||||||
|
Released 2024-06-01
|
||||||
|
|
||||||
|
- minor fixes for objectivefs and iscsi
|
||||||
|
|
||||||
|
# v1.9.2
|
||||||
|
|
||||||
|
Released 2024-05-23
|
||||||
|
|
||||||
|
- minor fixes for objectivefs and iscsi
|
||||||
|
|
||||||
|
# v1.9.1
|
||||||
|
|
||||||
|
Released 2024-05-06
|
||||||
|
|
||||||
|
- fix iscsi hostname lookup regression (#393)
|
||||||
|
- fix resize issue (#390)
|
||||||
|
- fix Probe issue (#385)
|
||||||
|
|
||||||
|
# v1.9.0
|
||||||
|
|
||||||
|
Released 2024-03-26
|
||||||
|
|
||||||
|
- new `objectivefs` driver (https://objectivefs.com) support available for x86_64 and arm64
|
||||||
|
- TrueNAS
|
||||||
|
- SCALE 24.04 support
|
||||||
|
- fix `sudo` issue during resize operations (see #295)
|
||||||
|
- fix version detection logic and default to api version 2 (see #351)
|
||||||
|
- more robust `Probe` implementation
|
||||||
|
- contaimer images
|
||||||
|
- various fixes, improvements, dep upgrades, etc
|
||||||
|
- update container images to `debian:12` (bookworm)
|
||||||
|
- bump to nodejs-lts-iron from nodejs-lts-hydrogen
|
||||||
|
- support csi v1.6.0-v1.9.0
|
||||||
|
- allow `noop` delete operations (dangerous, only use if you _really_ know what you are doing, see #289)
|
||||||
|
- properly adhere to the `zvolDedup` and `zvolCompression` settings (see #322)
|
||||||
|
- `restic` and `kopia` support as a snapshot solution for `local-hostpath` and `*-client` drivers
|
||||||
|
|
||||||
|
# v1.8.4
|
||||||
|
|
||||||
|
Released 2023-11-09
|
||||||
|
|
||||||
|
- allow templatized `volume_id` (dangerous, only use if you _really_ know what you are doing)
|
||||||
|
- fix TrueNAS SCALE iscsi resize issue
|
||||||
|
- TrueNAS SCALE 23.10 support
|
||||||
|
- minor improvements/fixes throughout
|
||||||
|
- dependency updates
|
||||||
|
|
||||||
|
# v1.8.3
|
||||||
|
|
||||||
|
Released 2023-04-05
|
||||||
|
|
||||||
|
- fix invalid `access_mode` logic (see #287)
|
||||||
|
|
||||||
# v1.8.2
|
# v1.8.2
|
||||||
|
|
||||||
Released 2023-04-02
|
Released 2023-04-02
|
||||||
|
|
|
||||||
49
Dockerfile
49
Dockerfile
|
|
@ -1,4 +1,4 @@
|
||||||
FROM debian:11-slim AS build
|
FROM debian:12-slim AS build
|
||||||
#FROM --platform=$BUILDPLATFORM debian:10-slim AS build
|
#FROM --platform=$BUILDPLATFORM debian:10-slim AS build
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
|
@ -9,14 +9,14 @@ ARG BUILDPLATFORM
|
||||||
RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM"
|
RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM"
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
|
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
|
||||||
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
ENV NODE_VERSION=v16.18.0
|
ENV NODE_VERSION=v20.11.1
|
||||||
ENV NODE_ENV=production
|
ENV NODE_ENV=production
|
||||||
|
|
||||||
# install build deps
|
# install build deps
|
||||||
RUN apt-get update && apt-get install -y python make cmake gcc g++
|
RUN apt-get update && apt-get install -y python3 make cmake gcc g++
|
||||||
|
|
||||||
# install node
|
# install node
|
||||||
RUN apt-get update && apt-get install -y wget xz-utils
|
RUN apt-get update && apt-get install -y wget xz-utils
|
||||||
|
|
@ -26,8 +26,8 @@ ENV PATH=/usr/local/lib/nodejs/bin:$PATH
|
||||||
|
|
||||||
# Run as a non-root user
|
# Run as a non-root user
|
||||||
RUN useradd --create-home csi \
|
RUN useradd --create-home csi \
|
||||||
&& mkdir /home/csi/app \
|
&& mkdir /home/csi/app \
|
||||||
&& chown -R csi: /home/csi
|
&& chown -R csi: /home/csi
|
||||||
WORKDIR /home/csi/app
|
WORKDIR /home/csi/app
|
||||||
USER csi
|
USER csi
|
||||||
|
|
||||||
|
|
@ -40,31 +40,33 @@ RUN rm -rf docker
|
||||||
######################
|
######################
|
||||||
# actual image
|
# actual image
|
||||||
######################
|
######################
|
||||||
FROM debian:11-slim
|
FROM debian:12-slim
|
||||||
|
|
||||||
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
|
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
|
||||||
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
|
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
|
||||||
LABEL org.opencontainers.image.licenses MIT
|
LABEL org.opencontainers.image.licenses MIT
|
||||||
|
|
||||||
ENV DEBIAN_FRONTEND=noninteractive
|
ENV DEBIAN_FRONTEND=noninteractive
|
||||||
|
ENV DEMOCRATIC_CSI_IS_CONTAINER=true
|
||||||
|
|
||||||
ARG TARGETPLATFORM
|
ARG TARGETPLATFORM
|
||||||
ARG BUILDPLATFORM
|
ARG BUILDPLATFORM
|
||||||
|
ARG OBJECTIVEFS_DOWNLOAD_ID
|
||||||
|
|
||||||
RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM"
|
RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM"
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
|
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
|
||||||
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||||
|
|
||||||
ENV LANG=en_US.utf8
|
ENV LANG=en_US.utf8
|
||||||
ENV NODE_ENV=production
|
ENV NODE_ENV=production
|
||||||
|
|
||||||
# Workaround for https://github.com/nodejs/node/issues/37219
|
# Workaround for https://github.com/nodejs/node/issues/37219
|
||||||
RUN test $(uname -m) != armv7l || ( \
|
RUN test $(uname -m) != armv7l || ( \
|
||||||
apt-get update \
|
apt-get update \
|
||||||
&& apt-get install -y libatomic1 \
|
&& apt-get install -y libatomic1 \
|
||||||
&& rm -rf /var/lib/apt/lists/* \
|
&& rm -rf /var/lib/apt/lists/* \
|
||||||
)
|
)
|
||||||
|
|
||||||
# install node
|
# install node
|
||||||
#ENV PATH=/usr/local/lib/nodejs/bin:$PATH
|
#ENV PATH=/usr/local/lib/nodejs/bin:$PATH
|
||||||
|
|
@ -75,14 +77,31 @@ COPY --from=build /usr/local/lib/nodejs/bin/node /usr/local/bin/node
|
||||||
# netbase is required by rpcbind/rpcinfo to work properly
|
# netbase is required by rpcbind/rpcinfo to work properly
|
||||||
# /etc/{services,rpc} are required
|
# /etc/{services,rpc} are required
|
||||||
RUN apt-get update && \
|
RUN apt-get update && \
|
||||||
apt-get install -y netbase socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli && \
|
apt-get install -y wget netbase zip bzip2 socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli fuse3 && \
|
||||||
rm -rf /var/lib/apt/lists/*
|
rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
ARG RCLONE_VERSION=1.66.0
|
||||||
|
ADD docker/rclone-installer.sh /usr/local/sbin
|
||||||
|
RUN chmod +x /usr/local/sbin/rclone-installer.sh && rclone-installer.sh
|
||||||
|
|
||||||
|
ARG RESTIC_VERSION=0.16.4
|
||||||
|
ADD docker/restic-installer.sh /usr/local/sbin
|
||||||
|
RUN chmod +x /usr/local/sbin/restic-installer.sh && restic-installer.sh
|
||||||
|
|
||||||
|
ARG KOPIA_VERSION=0.16.1
|
||||||
|
ADD docker/kopia-installer.sh /usr/local/sbin
|
||||||
|
RUN chmod +x /usr/local/sbin/kopia-installer.sh && kopia-installer.sh
|
||||||
|
|
||||||
# controller requirements
|
# controller requirements
|
||||||
#RUN apt-get update && \
|
#RUN apt-get update && \
|
||||||
# apt-get install -y ansible && \
|
# apt-get install -y ansible && \
|
||||||
# rm -rf /var/lib/apt/lists/*
|
# rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# install objectivefs
|
||||||
|
ARG OBJECTIVEFS_VERSION=7.2
|
||||||
|
ADD docker/objectivefs-installer.sh /usr/local/sbin
|
||||||
|
RUN chmod +x /usr/local/sbin/objectivefs-installer.sh && objectivefs-installer.sh
|
||||||
|
|
||||||
# install wrappers
|
# install wrappers
|
||||||
ADD docker/iscsiadm /usr/local/sbin
|
ADD docker/iscsiadm /usr/local/sbin
|
||||||
RUN chmod +x /usr/local/sbin/iscsiadm
|
RUN chmod +x /usr/local/sbin/iscsiadm
|
||||||
|
|
@ -107,7 +126,7 @@ RUN chmod +x /usr/local/bin/oneclient
|
||||||
|
|
||||||
# Run as a non-root user
|
# Run as a non-root user
|
||||||
RUN useradd --create-home csi \
|
RUN useradd --create-home csi \
|
||||||
&& chown -R csi: /home/csi
|
&& chown -R csi: /home/csi
|
||||||
|
|
||||||
COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app
|
COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app
|
||||||
|
|
||||||
|
|
|
||||||
39
README.md
39
README.md
|
|
@ -30,6 +30,7 @@ have access to resizing, snapshots, clones, etc functionality.
|
||||||
- `zfs-local-dataset` (provision node-local volume as dataset)
|
- `zfs-local-dataset` (provision node-local volume as dataset)
|
||||||
- `zfs-local-zvol` (provision node-local volume as zvol)
|
- `zfs-local-zvol` (provision node-local volume as zvol)
|
||||||
- `synology-iscsi` experimental (manages volumes to share over iscsi)
|
- `synology-iscsi` experimental (manages volumes to share over iscsi)
|
||||||
|
- `objectivefs` (manages objectivefs volumes)
|
||||||
- `lustre-client` (crudely provisions storage using a shared lustre
|
- `lustre-client` (crudely provisions storage using a shared lustre
|
||||||
share/directory for all volumes)
|
share/directory for all volumes)
|
||||||
- `nfs-client` (crudely provisions storage using a shared nfs share/directory
|
- `nfs-client` (crudely provisions storage using a shared nfs share/directory
|
||||||
|
|
@ -63,6 +64,7 @@ Predominantly 3 things are needed:
|
||||||
from `nfs-client-provisioner` to `democratic-csi`)
|
from `nfs-client-provisioner` to `democratic-csi`)
|
||||||
- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc
|
- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc
|
||||||
(migrating between storage classes using `velero`)
|
(migrating between storage classes using `velero`)
|
||||||
|
- https://github.com/fenio/k8s-truenas (NFS/iSCSI over API with TrueNAS Scale)
|
||||||
|
|
||||||
## Node Prep
|
## Node Prep
|
||||||
|
|
||||||
|
|
@ -186,9 +188,11 @@ node:
|
||||||
|
|
||||||
and continue your democratic installation as usuall with other iscsi drivers.
|
and continue your democratic installation as usuall with other iscsi drivers.
|
||||||
|
|
||||||
#### Privilged Namespace
|
#### Privileged Namespace
|
||||||
|
|
||||||
democratic-csi requires privileged access to the nodes, so the namespace should allow for privileged pods. One way of doing it is via [namespace labels](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/).
|
democratic-csi requires privileged access to the nodes, so the namespace should allow for privileged pods. One way of doing it is via [namespace labels](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/).
|
||||||
Add the followin label to the democratic-csi installation namespace `pod-security.kubernetes.io/enforce=privileged`
|
Add the followin label to the democratic-csi installation namespace `pod-security.kubernetes.io/enforce=privileged`
|
||||||
|
|
||||||
```
|
```
|
||||||
kubectl label --overwrite namespace democratic-csi pod-security.kubernetes.io/enforce=privileged
|
kubectl label --overwrite namespace democratic-csi pod-security.kubernetes.io/enforce=privileged
|
||||||
```
|
```
|
||||||
|
|
@ -332,8 +336,9 @@ with much older versions as well.
|
||||||
The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be
|
The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be
|
||||||
used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh`
|
used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh`
|
||||||
connections and do all operations entirely with the TrueNAS api. With that in
|
connections and do all operations entirely with the TrueNAS api. With that in
|
||||||
mind, any ssh/shell/etc requirements below can be safely ignored. Also note the
|
mind, any ssh/shell/etc requirements below can be safely ignored. The minimum
|
||||||
following known issues:
|
volume size through the api is `1G` so beware that requested volumes with a
|
||||||
|
size small will be increased to `1G`. Also note the following known issues:
|
||||||
|
|
||||||
- https://jira.ixsystems.com/browse/NAS-111870
|
- https://jira.ixsystems.com/browse/NAS-111870
|
||||||
- https://github.com/democratic-csi/democratic-csi/issues/112
|
- https://github.com/democratic-csi/democratic-csi/issues/112
|
||||||
|
|
@ -533,6 +538,28 @@ saveconfig /etc/nvmet/config.json
|
||||||
|
|
||||||
Ensure iscsi manager has been installed and is generally setup/configured. DSM 6.3+ is supported.
|
Ensure iscsi manager has been installed and is generally setup/configured. DSM 6.3+ is supported.
|
||||||
|
|
||||||
|
### objectivefs (objectivefs)
|
||||||
|
|
||||||
|
ObjectiveFS requires the use of an _Admin Key_ to properly automate the
|
||||||
|
lifecycle of filesystems. Each deployment of the driver will point to a single
|
||||||
|
`pool` (bucket) and create individual `filesystems` within that bucket
|
||||||
|
on-demand.
|
||||||
|
|
||||||
|
Ensure the config value used for `pool` is an existing bucket. Be sure the
|
||||||
|
bucket is _NOT_ being used in fs mode (ie: the whole bucket is a single fs).
|
||||||
|
|
||||||
|
The `democratic-csi` `node` container will host the fuse mount process so
|
||||||
|
be careful to only upgrade when all relevant workloads have been drained from
|
||||||
|
the respective node. Also beware that any cpu/memory limits placed on the
|
||||||
|
container by the orchestration system will impact any ability to use the
|
||||||
|
caching, etc features of objectivefs.
|
||||||
|
|
||||||
|
- https://objectivefs.com/howto/csi-driver-objectivefs
|
||||||
|
- https://objectivefs.com/howto/csi-driver-objectivefs-kubernetes-managed
|
||||||
|
- https://objectivefs.com/howto/objectivefs-admin-key-setup
|
||||||
|
- https://objectivefs.com/features#filesystem-pool
|
||||||
|
- https://objectivefs.com/howto/how-to-create-a-filesystem-with-an-existing-empty-bucket
|
||||||
|
|
||||||
## Helm Installation
|
## Helm Installation
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
@ -647,12 +674,6 @@ Copy the `contrib/freenas-provisioner-to-democratic-csi.sh` script from the
|
||||||
project to your workstation, read the script in detail, and edit the variables
|
project to your workstation, read the script in detail, and edit the variables
|
||||||
to your needs to start migrating!
|
to your needs to start migrating!
|
||||||
|
|
||||||
# Sponsors
|
|
||||||
|
|
||||||
A special shout out to the wonderful sponsors of the project!
|
|
||||||
|
|
||||||
[](http://ixsystems.com/)
|
|
||||||
|
|
||||||
# Related
|
# Related
|
||||||
|
|
||||||
- https://github.com/nmaupu/freenas-provisioner
|
- https://github.com/nmaupu/freenas-provisioner
|
||||||
|
|
|
||||||
|
|
@ -63,6 +63,10 @@ const args = require("yargs")
|
||||||
"1.3.0",
|
"1.3.0",
|
||||||
"1.4.0",
|
"1.4.0",
|
||||||
"1.5.0",
|
"1.5.0",
|
||||||
|
"1.6.0",
|
||||||
|
"1.7.0",
|
||||||
|
"1.8.0",
|
||||||
|
"1.9.0",
|
||||||
],
|
],
|
||||||
})
|
})
|
||||||
.demandOption(["csi-version"], "csi-version is required")
|
.demandOption(["csi-version"], "csi-version is required")
|
||||||
|
|
@ -103,6 +107,7 @@ if (!args.serverSocket && !args.serverAddress && !args.serverPort) {
|
||||||
}
|
}
|
||||||
|
|
||||||
//console.log(args);
|
//console.log(args);
|
||||||
|
//console.log(process.env);
|
||||||
|
|
||||||
const package = require("../package.json");
|
const package = require("../package.json");
|
||||||
args.version = package.version;
|
args.version = package.version;
|
||||||
|
|
@ -135,10 +140,13 @@ const csi = protoDescriptor.csi.v1;
|
||||||
|
|
||||||
logger.info("initializing csi driver: %s", options.driver);
|
logger.info("initializing csi driver: %s", options.driver);
|
||||||
|
|
||||||
|
const { Registry } = require("../src/utils/registry");
|
||||||
|
let globalRegistry = new Registry();
|
||||||
|
|
||||||
let driver;
|
let driver;
|
||||||
try {
|
try {
|
||||||
driver = require("../src/driver/factory").factory(
|
driver = require("../src/driver/factory").factory(
|
||||||
{ logger, args, cache, package, csiVersion },
|
{ logger, args, cache, package, csiVersion, registry: globalRegistry },
|
||||||
options
|
options
|
||||||
);
|
);
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
|
|
@ -397,10 +405,58 @@ logger.info(
|
||||||
bindSocket
|
bindSocket
|
||||||
);
|
);
|
||||||
|
|
||||||
[`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`].forEach(
|
const signalMapping = {
|
||||||
|
1: "SIGHUP",
|
||||||
|
2: "SIGINT",
|
||||||
|
3: "SIGQUIT",
|
||||||
|
4: "SIGILL",
|
||||||
|
5: "SIGTRAP",
|
||||||
|
6: "SIGABRT",
|
||||||
|
7: "SIGEMT",
|
||||||
|
8: "SIGFPE",
|
||||||
|
9: "SIGKILL",
|
||||||
|
10: "SIGBUS",
|
||||||
|
11: "SIGSEGV",
|
||||||
|
12: "SIGSYS",
|
||||||
|
13: "SIGPIPE",
|
||||||
|
14: "SIGALRM",
|
||||||
|
15: "SIGTERM",
|
||||||
|
16: "SIGURG",
|
||||||
|
17: "SIGSTOP",
|
||||||
|
18: "SIGTSTP",
|
||||||
|
19: "SIGCONT",
|
||||||
|
20: "SIGCHLD",
|
||||||
|
21: "SIGTTIN",
|
||||||
|
22: "SIGTTOU",
|
||||||
|
23: "SIGIO",
|
||||||
|
24: "SIGXCPU",
|
||||||
|
25: "SIGXFSZ",
|
||||||
|
26: "SIGVTALRM",
|
||||||
|
27: "SIGPROF",
|
||||||
|
28: "SIGWINCH",
|
||||||
|
29: "SIGINFO",
|
||||||
|
30: "SIGUSR1",
|
||||||
|
31: "SIGUSR2",
|
||||||
|
};
|
||||||
|
|
||||||
|
[(`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`)].forEach(
|
||||||
(eventType) => {
|
(eventType) => {
|
||||||
process.on(eventType, async (code) => {
|
process.on(eventType, async (code) => {
|
||||||
console.log(`running server shutdown, exit code: ${code}`);
|
let codeNumber = null;
|
||||||
|
let codeName = null;
|
||||||
|
if (code > 0) {
|
||||||
|
codeNumber = code;
|
||||||
|
codeName = signalMapping[code];
|
||||||
|
} else {
|
||||||
|
codeNumber = Object.keys(signalMapping).find(
|
||||||
|
(key) => signalMapping[key] === code
|
||||||
|
);
|
||||||
|
codeName = code;
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(
|
||||||
|
`running server shutdown, exit code: ${codeNumber} (${codeName})`
|
||||||
|
);
|
||||||
|
|
||||||
// attempt clean shutdown of in-flight requests
|
// attempt clean shutdown of in-flight requests
|
||||||
try {
|
try {
|
||||||
|
|
@ -431,7 +487,7 @@ logger.info(
|
||||||
}
|
}
|
||||||
|
|
||||||
console.log("server fully shutdown, exiting");
|
console.log("server fully shutdown, exiting");
|
||||||
process.exit(code);
|
process.exit(codeNumber);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
|
||||||
|
|
@ -127,6 +127,7 @@ async function main() {
|
||||||
|
|
||||||
for (let csiVolume of csiVolumes) {
|
for (let csiVolume of csiVolumes) {
|
||||||
let volume_id = csiVolume.volume.volume_id;
|
let volume_id = csiVolume.volume.volume_id;
|
||||||
|
let volume_context = JSON.stringify(csiVolume.volume.volume_context) || "Unknown";
|
||||||
//console.log(`processing csi volume ${volume_id}`);
|
//console.log(`processing csi volume ${volume_id}`);
|
||||||
let k8sVolume = k8sVolumes.find((i_k8sVolume) => {
|
let k8sVolume = k8sVolumes.find((i_k8sVolume) => {
|
||||||
let volume_handle = _.get(i_k8sVolume, "spec.csi.volumeHandle", null);
|
let volume_handle = _.get(i_k8sVolume, "spec.csi.volumeHandle", null);
|
||||||
|
|
@ -134,7 +135,7 @@ async function main() {
|
||||||
});
|
});
|
||||||
|
|
||||||
if (!k8sVolume) {
|
if (!k8sVolume) {
|
||||||
console.log(`volume ${volume_id} is NOT in k8s`);
|
console.log(`volume ${volume_id} (${volume_context}) is NOT in k8s`);
|
||||||
if (process.env.DRY_RUN == "1") {
|
if (process.env.DRY_RUN == "1") {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
@ -159,7 +160,7 @@ async function main() {
|
||||||
console.log(`skipping delete of csi volume ${volume_id}`);
|
console.log(`skipping delete of csi volume ${volume_id}`);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
console.log(`volume ${volume_id} is in k8s`);
|
console.log(`volume ${volume_id} (${volume_context}) is in k8s`);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@ $exeargs += "-csi.mountdir", "${env:CSI_SANITY_TEMP_DIR}\mnt"
|
||||||
$exeargs += "-csi.stagingdir", "${env:CSI_SANITY_TEMP_DIR}\stage"
|
$exeargs += "-csi.stagingdir", "${env:CSI_SANITY_TEMP_DIR}\stage"
|
||||||
$exeargs += "-csi.testvolumeexpandsize", "2147483648"
|
$exeargs += "-csi.testvolumeexpandsize", "2147483648"
|
||||||
$exeargs += "-csi.testvolumesize", "1073741824"
|
$exeargs += "-csi.testvolumesize", "1073741824"
|
||||||
|
$exeargs += "--csi.secrets", "${env:CSI_SANITY_SECRETS}"
|
||||||
$exeargs += "-ginkgo.skip", "${env:CSI_SANITY_SKIP}"
|
$exeargs += "-ginkgo.skip", "${env:CSI_SANITY_SKIP}"
|
||||||
$exeargs += "-ginkgo.focus", "${env:CSI_SANITY_FOCUS}"
|
$exeargs += "-ginkgo.focus", "${env:CSI_SANITY_FOCUS}"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ set -x
|
||||||
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
|
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
|
||||||
: ${CSI_SANITY_TEMP_DIR:=$(mktemp -d -t ci-csi-sanity-tmp-XXXXXXXX)}
|
: ${CSI_SANITY_TEMP_DIR:=$(mktemp -d -t ci-csi-sanity-tmp-XXXXXXXX)}
|
||||||
|
|
||||||
if [[ ! -S "${CSI_ENDPOINT}" ]];then
|
if [[ ! -S "${CSI_ENDPOINT}" ]]; then
|
||||||
echo "csi socket: ${CSI_ENDPOINT} does not exist"
|
echo "csi socket: ${CSI_ENDPOINT} does not exist"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
@ -15,27 +15,29 @@ fi
|
||||||
trap ctrl_c INT
|
trap ctrl_c INT
|
||||||
|
|
||||||
function ctrl_c() {
|
function ctrl_c() {
|
||||||
echo "Trapped CTRL-C"
|
echo "Trapped CTRL-C"
|
||||||
exit 1
|
exit 1
|
||||||
}
|
}
|
||||||
|
|
||||||
chmod g+w,o+w "${CSI_ENDPOINT}";
|
chmod g+w,o+w "${CSI_ENDPOINT}"
|
||||||
mkdir -p "${CSI_SANITY_TEMP_DIR}";
|
mkdir -p "${CSI_SANITY_TEMP_DIR}"
|
||||||
rm -rf "${CSI_SANITY_TEMP_DIR}"/*;
|
rm -rf "${CSI_SANITY_TEMP_DIR}"/*
|
||||||
chmod -R 777 "${CSI_SANITY_TEMP_DIR}";
|
chmod -R 777 "${CSI_SANITY_TEMP_DIR}"
|
||||||
|
|
||||||
# https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity
|
# https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity
|
||||||
# FOR DEBUG: --ginkgo.v
|
# FOR DEBUG: --ginkgo.v
|
||||||
# --csi.secrets=<path to secrets file>
|
# --csi.secrets=<path to secrets file>
|
||||||
|
#
|
||||||
# expand size 2073741824 to have mis-alignments
|
# expand size 2073741824 to have mis-alignments
|
||||||
# expand size 2147483648 to have everything line up nicely
|
# expand size 2147483648 to have everything line up nicely
|
||||||
|
|
||||||
csi-sanity --csi.endpoint "unix://${CSI_ENDPOINT}" \
|
csi-sanity --csi.endpoint "unix://${CSI_ENDPOINT}" \
|
||||||
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
|
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
|
||||||
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
|
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
|
||||||
--csi.testvolumeexpandsize 2147483648 \
|
--csi.testvolumeexpandsize 2147483648 \
|
||||||
--csi.testvolumesize 1073741824 \
|
--csi.testvolumesize 1073741824 \
|
||||||
-ginkgo.skip "${CSI_SANITY_SKIP}" \
|
--csi.secrets="${CSI_SANITY_SECRETS}" \
|
||||||
-ginkgo.focus "${CSI_SANITY_FOCUS}"
|
-ginkgo.skip "${CSI_SANITY_SKIP}" \
|
||||||
|
-ginkgo.focus "${CSI_SANITY_FOCUS}"
|
||||||
|
|
||||||
rm -rf "${CSI_SANITY_TEMP_DIR}"
|
rm -rf "${CSI_SANITY_TEMP_DIR}"
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ Set-Location $env:PWD
|
||||||
Write-Output "launching server"
|
Write-Output "launching server"
|
||||||
|
|
||||||
$env:LOG_LEVEL = "debug"
|
$env:LOG_LEVEL = "debug"
|
||||||
$env:CSI_VERSION = "1.5.0"
|
$env:CSI_VERSION = "1.9.0"
|
||||||
$env:CSI_NAME = "driver-test"
|
$env:CSI_NAME = "driver-test"
|
||||||
$env:CSI_SANITY = "1"
|
$env:CSI_SANITY = "1"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -9,19 +9,19 @@ echo "current launch-server PATH: ${PATH}"
|
||||||
: ${CI_BUILD_KEY:="local"}
|
: ${CI_BUILD_KEY:="local"}
|
||||||
: ${TEMPLATE_CONFIG_FILE:=${1}}
|
: ${TEMPLATE_CONFIG_FILE:=${1}}
|
||||||
: ${CSI_MODE:=""}
|
: ${CSI_MODE:=""}
|
||||||
: ${CSI_VERSION:="1.5.0"}
|
: ${CSI_VERSION:="1.9.0"}
|
||||||
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
|
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
|
||||||
: ${LOG_PATH:=/tmp/csi-${CI_BUILD_KEY}.log}
|
: ${LOG_PATH:=/tmp/csi-${CI_BUILD_KEY}.log}
|
||||||
|
|
||||||
if [[ "x${CONFIG_FILE}" == "x" ]];then
|
if [[ "x${CONFIG_FILE}" == "x" ]]; then
|
||||||
: ${CONFIG_FILE:=/tmp/csi-config-${CI_BUILD_KEY}.yaml}
|
: ${CONFIG_FILE:=/tmp/csi-config-${CI_BUILD_KEY}.yaml}
|
||||||
|
|
||||||
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]];then
|
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]]; then
|
||||||
envsubst < "${TEMPLATE_CONFIG_FILE}" > "${CONFIG_FILE}"
|
envsubst <"${TEMPLATE_CONFIG_FILE}" >"${CONFIG_FILE}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "x${CSI_MODE}" != "x" ]];then
|
if [[ "x${CSI_MODE}" != "x" ]]; then
|
||||||
EXTRA_ARGS="--csi-mode ${CSI_MODE} ${EXTRA_ARGS}"
|
EXTRA_ARGS="--csi-mode ${CSI_MODE} ${EXTRA_ARGS}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,20 @@
|
||||||
|
driver: objectivefs
|
||||||
|
|
||||||
|
objectivefs:
|
||||||
|
pool: ${OBJECTIVEFS_POOL}
|
||||||
|
cli:
|
||||||
|
sudoEnabled: false
|
||||||
|
env:
|
||||||
|
OBJECTIVEFS_LICENSE: ${OBJECTIVEFS_LICENSE}
|
||||||
|
OBJECTSTORE: ${OBJECTIVEFS_OBJECTSTORE}
|
||||||
|
ENDPOINT: ${OBJECTIVEFS_ENDPOINT_PROTOCOL}://${OBJECTIVEFS_ENDPOINT_HOST}:${OBJECTIVEFS_ENDPOINT_PORT}
|
||||||
|
SECRET_KEY: ${OBJECTIVEFS_SECRET_KEY}
|
||||||
|
ACCESS_KEY: ${OBJECTIVEFS_ACCESS_KEY}
|
||||||
|
OBJECTIVEFS_PASSPHRASE: ${OBJECTIVEFS_PASSPHRASE}
|
||||||
|
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
# max volume name length is 63
|
||||||
|
strategy: crc32
|
||||||
|
|
@ -29,3 +29,10 @@ iscsi:
|
||||||
targetGroupAuthGroup:
|
targetGroupAuthGroup:
|
||||||
# 0-100 (0 == ignore)
|
# 0-100 (0 == ignore)
|
||||||
extentAvailThreshold: 0
|
extentAvailThreshold: 0
|
||||||
|
|
||||||
|
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
strategy: crc16
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
driver: freenas-api-iscsi
|
||||||
|
|
||||||
|
httpConnection:
|
||||||
|
protocol: http
|
||||||
|
host: ${TRUENAS_HOST}
|
||||||
|
port: 80
|
||||||
|
#apiKey:
|
||||||
|
username: ${TRUENAS_USERNAME}
|
||||||
|
password: ${TRUENAS_PASSWORD}
|
||||||
|
|
||||||
|
zfs:
|
||||||
|
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||||
|
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||||
|
|
||||||
|
zvolCompression:
|
||||||
|
zvolDedup:
|
||||||
|
zvolEnableReservation: false
|
||||||
|
zvolBlocksize:
|
||||||
|
|
||||||
|
iscsi:
|
||||||
|
targetPortal: ${TRUENAS_HOST}
|
||||||
|
interface: ""
|
||||||
|
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||||
|
nameSuffix: ""
|
||||||
|
targetGroups:
|
||||||
|
- targetGroupPortalGroup: 1
|
||||||
|
targetGroupInitiatorGroup: 1
|
||||||
|
targetGroupAuthType: None
|
||||||
|
targetGroupAuthGroup:
|
||||||
|
# 0-100 (0 == ignore)
|
||||||
|
extentAvailThreshold: 0
|
||||||
|
|
||||||
|
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
strategy: crc16
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
driver: freenas-api-nfs
|
||||||
|
|
||||||
|
httpConnection:
|
||||||
|
protocol: http
|
||||||
|
host: ${TRUENAS_HOST}
|
||||||
|
port: 80
|
||||||
|
#apiKey:
|
||||||
|
username: ${TRUENAS_USERNAME}
|
||||||
|
password: ${TRUENAS_PASSWORD}
|
||||||
|
|
||||||
|
zfs:
|
||||||
|
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||||
|
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||||
|
|
||||||
|
datasetEnableQuotas: true
|
||||||
|
datasetEnableReservation: false
|
||||||
|
datasetPermissionsMode: "0777"
|
||||||
|
datasetPermissionsUser: 0
|
||||||
|
datasetPermissionsGroup: 0
|
||||||
|
|
||||||
|
nfs:
|
||||||
|
shareHost: ${TRUENAS_HOST}
|
||||||
|
shareAlldirs: false
|
||||||
|
shareAllowedHosts: []
|
||||||
|
shareAllowedNetworks: []
|
||||||
|
shareMaprootUser: root
|
||||||
|
shareMaprootGroup: root
|
||||||
|
shareMapallUser: ""
|
||||||
|
shareMapallGroup: ""
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
driver: freenas-api-smb
|
||||||
|
|
||||||
|
httpConnection:
|
||||||
|
protocol: http
|
||||||
|
host: ${TRUENAS_HOST}
|
||||||
|
port: 80
|
||||||
|
#apiKey:
|
||||||
|
username: ${TRUENAS_USERNAME}
|
||||||
|
password: ${TRUENAS_PASSWORD}
|
||||||
|
|
||||||
|
zfs:
|
||||||
|
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||||
|
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||||
|
|
||||||
|
datasetEnableQuotas: true
|
||||||
|
datasetEnableReservation: false
|
||||||
|
datasetPermissionsMode: "0770"
|
||||||
|
datasetPermissionsUser: 1001
|
||||||
|
datasetPermissionsGroup: 1001
|
||||||
|
|
||||||
|
smb:
|
||||||
|
shareHost: ${TRUENAS_HOST}
|
||||||
|
#nameTemplate: ""
|
||||||
|
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||||
|
nameSuffix: ""
|
||||||
|
shareAuxiliaryConfigurationTemplate: |
|
||||||
|
#guest ok = yes
|
||||||
|
#guest only = yes
|
||||||
|
shareHome: false
|
||||||
|
shareAllowedHosts: []
|
||||||
|
shareDeniedHosts: []
|
||||||
|
#shareDefaultPermissions: true
|
||||||
|
shareGuestOk: false
|
||||||
|
#shareGuestOnly: true
|
||||||
|
#shareShowHiddenFiles: true
|
||||||
|
shareRecycleBin: false
|
||||||
|
shareBrowsable: false
|
||||||
|
shareAccessBasedEnumeration: true
|
||||||
|
shareTimeMachine: false
|
||||||
|
#shareStorageTask:
|
||||||
|
|
||||||
|
node:
|
||||||
|
mount:
|
||||||
|
mount_flags: "username=smbroot,password=smbroot"
|
||||||
|
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
strategy: crc16
|
||||||
|
|
@ -0,0 +1,38 @@
|
||||||
|
driver: freenas-api-iscsi
|
||||||
|
|
||||||
|
httpConnection:
|
||||||
|
protocol: http
|
||||||
|
host: ${TRUENAS_HOST}
|
||||||
|
port: 80
|
||||||
|
#apiKey:
|
||||||
|
username: ${TRUENAS_USERNAME}
|
||||||
|
password: ${TRUENAS_PASSWORD}
|
||||||
|
|
||||||
|
zfs:
|
||||||
|
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||||
|
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||||
|
|
||||||
|
zvolCompression:
|
||||||
|
zvolDedup:
|
||||||
|
zvolEnableReservation: false
|
||||||
|
zvolBlocksize:
|
||||||
|
|
||||||
|
iscsi:
|
||||||
|
targetPortal: ${TRUENAS_HOST}
|
||||||
|
interface: ""
|
||||||
|
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||||
|
nameSuffix: ""
|
||||||
|
targetGroups:
|
||||||
|
- targetGroupPortalGroup: 1
|
||||||
|
targetGroupInitiatorGroup: 1
|
||||||
|
targetGroupAuthType: None
|
||||||
|
targetGroupAuthGroup:
|
||||||
|
# 0-100 (0 == ignore)
|
||||||
|
extentAvailThreshold: 0
|
||||||
|
|
||||||
|
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
strategy: crc16
|
||||||
|
|
@ -0,0 +1,29 @@
|
||||||
|
driver: freenas-api-nfs
|
||||||
|
|
||||||
|
httpConnection:
|
||||||
|
protocol: http
|
||||||
|
host: ${TRUENAS_HOST}
|
||||||
|
port: 80
|
||||||
|
#apiKey:
|
||||||
|
username: ${TRUENAS_USERNAME}
|
||||||
|
password: ${TRUENAS_PASSWORD}
|
||||||
|
|
||||||
|
zfs:
|
||||||
|
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||||
|
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||||
|
|
||||||
|
datasetEnableQuotas: true
|
||||||
|
datasetEnableReservation: false
|
||||||
|
datasetPermissionsMode: "0777"
|
||||||
|
datasetPermissionsUser: 0
|
||||||
|
datasetPermissionsGroup: 0
|
||||||
|
|
||||||
|
nfs:
|
||||||
|
shareHost: ${TRUENAS_HOST}
|
||||||
|
shareAlldirs: false
|
||||||
|
shareAllowedHosts: []
|
||||||
|
shareAllowedNetworks: []
|
||||||
|
shareMaprootUser: root
|
||||||
|
shareMaprootGroup: root
|
||||||
|
shareMapallUser: ""
|
||||||
|
shareMapallGroup: ""
|
||||||
|
|
@ -0,0 +1,50 @@
|
||||||
|
driver: freenas-api-smb
|
||||||
|
|
||||||
|
httpConnection:
|
||||||
|
protocol: http
|
||||||
|
host: ${TRUENAS_HOST}
|
||||||
|
port: 80
|
||||||
|
#apiKey:
|
||||||
|
username: ${TRUENAS_USERNAME}
|
||||||
|
password: ${TRUENAS_PASSWORD}
|
||||||
|
|
||||||
|
zfs:
|
||||||
|
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||||
|
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||||
|
|
||||||
|
datasetEnableQuotas: true
|
||||||
|
datasetEnableReservation: false
|
||||||
|
datasetPermissionsMode: "0770"
|
||||||
|
datasetPermissionsUser: 1001
|
||||||
|
datasetPermissionsGroup: 1001
|
||||||
|
|
||||||
|
smb:
|
||||||
|
shareHost: ${TRUENAS_HOST}
|
||||||
|
#nameTemplate: ""
|
||||||
|
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||||
|
nameSuffix: ""
|
||||||
|
shareAuxiliaryConfigurationTemplate: |
|
||||||
|
#guest ok = yes
|
||||||
|
#guest only = yes
|
||||||
|
shareHome: false
|
||||||
|
shareAllowedHosts: []
|
||||||
|
shareDeniedHosts: []
|
||||||
|
#shareDefaultPermissions: true
|
||||||
|
shareGuestOk: false
|
||||||
|
#shareGuestOnly: true
|
||||||
|
#shareShowHiddenFiles: true
|
||||||
|
shareRecycleBin: false
|
||||||
|
shareBrowsable: false
|
||||||
|
shareAccessBasedEnumeration: true
|
||||||
|
shareTimeMachine: false
|
||||||
|
#shareStorageTask:
|
||||||
|
|
||||||
|
node:
|
||||||
|
mount:
|
||||||
|
mount_flags: "username=smbroot,password=smbroot"
|
||||||
|
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
strategy: crc16
|
||||||
|
|
@ -43,19 +43,19 @@ if [[ ! -f ${PV_ORIG_FILE} ]]; then
|
||||||
kubectl get pv "${PV}" -o yaml >"${PV_ORIG_FILE}"
|
kubectl get pv "${PV}" -o yaml >"${PV_ORIG_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
reclaimPolicy=$(yq eval '.spec.persistentVolumeReclaimPolicy' "${PV_ORIG_FILE}")
|
reclaimPolicy=$(yq '.spec.persistentVolumeReclaimPolicy' "${PV_ORIG_FILE}")
|
||||||
|
|
||||||
# copy file for editing
|
# copy file for editing
|
||||||
cp "${PV_ORIG_FILE}" "${PV_TMP_FILE}"
|
cp "${PV_ORIG_FILE}" "${PV_TMP_FILE}"
|
||||||
|
|
||||||
# pre-process before edit
|
# pre-process before edit
|
||||||
yq -i eval 'del(.metadata.resourceVersion)' "${PV_TMP_FILE}"
|
yq -i -y 'del(.metadata.resourceVersion)' "${PV_TMP_FILE}"
|
||||||
|
|
||||||
# manually edit
|
# manually edit
|
||||||
${EDITOR} "${PV_TMP_FILE}"
|
${EDITOR} "${PV_TMP_FILE}"
|
||||||
|
|
||||||
# ask if looks good
|
# ask if looks good
|
||||||
yq eval '.' "${PV_TMP_FILE}"
|
yq '.' "${PV_TMP_FILE}"
|
||||||
yes_or_no "Would you like to delete the existing PV object and recreate with the above data?"
|
yes_or_no "Would you like to delete the existing PV object and recreate with the above data?"
|
||||||
|
|
||||||
# set relaim to Retain on PV
|
# set relaim to Retain on PV
|
||||||
|
|
@ -69,7 +69,7 @@ kubectl patch pv "${PV}" -p '{"metadata":{"finalizers": null }}' &>/dev/null ||
|
||||||
kubectl apply -f "${PV_TMP_FILE}"
|
kubectl apply -f "${PV_TMP_FILE}"
|
||||||
|
|
||||||
# restore original reclaim value
|
# restore original reclaim value
|
||||||
kubectl patch pv "${PV}" -p "{\"spec\":{\"persistentVolumeReclaimPolicy\":\"${reclaimPolicy}\"}}"
|
kubectl patch pv "${PV}" -p "{\"spec\":{\"persistentVolumeReclaimPolicy\":${reclaimPolicy}}}"
|
||||||
|
|
||||||
# spit out any zfs properties updates
|
# spit out any zfs properties updates
|
||||||
yes_or_no "Would you like to delete the PV backup file?" && {
|
yes_or_no "Would you like to delete the PV backup file?" && {
|
||||||
|
|
|
||||||
|
|
@ -17,25 +17,60 @@ SCRIPTDIR="$(
|
||||||
cd "${SCRIPTDIR}"
|
cd "${SCRIPTDIR}"
|
||||||
|
|
||||||
: "${NVMETCONFIG:="${SCRIPTDIR}/nvmet-config.json"}"
|
: "${NVMETCONFIG:="${SCRIPTDIR}/nvmet-config.json"}"
|
||||||
|
: "${NVMETVENV:="${SCRIPTDIR}/nvmet-venv"}"
|
||||||
|
|
||||||
export PATH=${HOME}/.local/bin:${PATH}
|
export PATH=${HOME}/.local/bin:${PATH}
|
||||||
|
|
||||||
modules=()
|
main() {
|
||||||
modules+=("nvmet")
|
|
||||||
modules+=("nvmet-fc")
|
|
||||||
modules+=("nvmet-rdma")
|
|
||||||
modules+=("nvmet-tcp")
|
|
||||||
|
|
||||||
for module in "${modules[@]}"; do
|
kernel_modules
|
||||||
modprobe "${module}"
|
nvmetcli ls &>/dev/null || {
|
||||||
done
|
setup_venv
|
||||||
|
install_nvmetcli
|
||||||
which nvmetcli &>/dev/null || {
|
|
||||||
which pip &>/dev/null || {
|
|
||||||
wget -O get-pip.py https://bootstrap.pypa.io/get-pip.py
|
|
||||||
python get-pip.py --user
|
|
||||||
rm get-pip.py
|
|
||||||
}
|
}
|
||||||
|
nvmetcli_restore
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
kernel_modules() {
|
||||||
|
|
||||||
|
modules=()
|
||||||
|
modules+=("nvmet")
|
||||||
|
modules+=("nvmet-fc")
|
||||||
|
modules+=("nvmet-rdma")
|
||||||
|
modules+=("nvmet-tcp")
|
||||||
|
|
||||||
|
for module in "${modules[@]}"; do
|
||||||
|
modprobe "${module}"
|
||||||
|
done
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
setup_venv() {
|
||||||
|
|
||||||
|
rm -rf ${NVMETVENV}
|
||||||
|
python -m venv ${NVMETVENV} --without-pip --system-site-packages
|
||||||
|
activate_venv
|
||||||
|
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
|
||||||
|
python get-pip.py
|
||||||
|
rm get-pip.py
|
||||||
|
deactivate_venv
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
activate_venv() {
|
||||||
|
|
||||||
|
. ${NVMETVENV}/bin/activate
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
deactivate_venv() {
|
||||||
|
|
||||||
|
deactivate
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
install_nvmetcli() {
|
||||||
|
|
||||||
if [[ ! -d nvmetcli ]]; then
|
if [[ ! -d nvmetcli ]]; then
|
||||||
git clone git://git.infradead.org/users/hch/nvmetcli.git
|
git clone git://git.infradead.org/users/hch/nvmetcli.git
|
||||||
|
|
@ -43,19 +78,31 @@ which nvmetcli &>/dev/null || {
|
||||||
|
|
||||||
cd nvmetcli
|
cd nvmetcli
|
||||||
|
|
||||||
# install to root home dir
|
activate_venv
|
||||||
python3 setup.py install --user
|
|
||||||
|
|
||||||
# install to root home dir
|
# install to root home dir
|
||||||
pip install configshell_fb --user
|
python3 setup.py install --install-scripts=${HOME}/.local/bin
|
||||||
|
|
||||||
|
# install to root home dir
|
||||||
|
pip install configshell_fb==1.1.30
|
||||||
|
|
||||||
# remove source
|
# remove source
|
||||||
cd "${SCRIPTDIR}"
|
cd "${SCRIPTDIR}"
|
||||||
rm -rf nvmetcli
|
rm -rf nvmetcli
|
||||||
|
|
||||||
|
deactivate_venv
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
cd "${SCRIPTDIR}"
|
nvmetcli_restore() {
|
||||||
nvmetcli restore "${NVMETCONFIG}"
|
|
||||||
|
|
||||||
touch /var/run/nvmet-config-loaded
|
activate_venv
|
||||||
chmod +r /var/run/nvmet-config-loaded
|
cd "${SCRIPTDIR}"
|
||||||
|
nvmetcli restore "${NVMETCONFIG}"
|
||||||
|
deactivate_venv
|
||||||
|
touch /var/run/nvmet-config-loaded
|
||||||
|
chmod +r /var/run/nvmet-config-loaded
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,6 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# v1.6.0
|
||||||
|
VERSION=${1}
|
||||||
|
|
||||||
|
curl -v -o "csi-${VERSION}.proto" https://raw.githubusercontent.com/container-storage-interface/spec/${VERSION}/csi.proto
|
||||||
|
|
@ -13,7 +13,7 @@ case ${ISCSIADM_HOST_STRATEGY} in
|
||||||
|
|
||||||
nsenter)
|
nsenter)
|
||||||
# https://github.com/siderolabs/extensions/issues/38#issuecomment-1125403043
|
# https://github.com/siderolabs/extensions/issues/38#issuecomment-1125403043
|
||||||
iscsid_pid=$(pgrep iscsid)
|
iscsid_pid=$(pgrep --exact --oldest iscsid)
|
||||||
if [[ "${iscsid_pid}x" == "x" ]]; then
|
if [[ "${iscsid_pid}x" == "x" ]]; then
|
||||||
echoerr "failed to find iscsid pid for nsenter"
|
echoerr "failed to find iscsid pid for nsenter"
|
||||||
exit 1
|
exit 1
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,36 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
PLATFORM_TYPE=${1}
|
||||||
|
|
||||||
|
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
|
||||||
|
PLATFORM=$BUILDPLATFORM
|
||||||
|
else
|
||||||
|
PLATFORM=$TARGETPLATFORM
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "x${PLATFORM}" == "x" ]]; then
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
|
||||||
|
if [ "$PLATFORM" = "linux/amd64" ]; then
|
||||||
|
export PLATFORM_ARCH="amd64"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm64" ]; then
|
||||||
|
export PLATFORM_ARCH="arm64"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
|
||||||
|
export PLATFORM_ARCH="armhf"
|
||||||
|
else
|
||||||
|
echo "unsupported/unknown kopia PLATFORM ${PLATFORM}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "I am installing kopia $KOPIA_VERSION"
|
||||||
|
|
||||||
|
export DEB_FILE="kopia.deb"
|
||||||
|
wget -O "${DEB_FILE}" "https://github.com/kopia/kopia/releases/download/v${KOPIA_VERSION}/kopia_${KOPIA_VERSION}_linux_${PLATFORM_ARCH}.deb"
|
||||||
|
dpkg -i "${DEB_FILE}"
|
||||||
|
|
||||||
|
rm "${DEB_FILE}"
|
||||||
|
|
@ -0,0 +1,40 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
if [[ -z "${OBJECTIVEFS_DOWNLOAD_ID}" ]]; then
|
||||||
|
echo 'missing OBJECTIVEFS_DOWNLOAD_ID, moving on'
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
PLATFORM_TYPE=${1}
|
||||||
|
|
||||||
|
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
|
||||||
|
PLATFORM=$BUILDPLATFORM
|
||||||
|
else
|
||||||
|
PLATFORM=$TARGETPLATFORM
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "x${PLATFORM}" == "x" ]]; then
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
|
||||||
|
if [ "$PLATFORM" = "linux/amd64" ]; then
|
||||||
|
export OBJECTIVEFS_ARCH="amd64"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm64" ]; then
|
||||||
|
export OBJECTIVEFS_ARCH="arm64"
|
||||||
|
else
|
||||||
|
echo "unsupported/unknown PLATFORM ${PLATFORM}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
export DEB_FILE="objectivefs_${OBJECTIVEFS_VERSION}_${OBJECTIVEFS_ARCH}.deb"
|
||||||
|
|
||||||
|
echo "I am installing objectivefs $OBJECTIVEFS_VERSION"
|
||||||
|
|
||||||
|
wget "https://objectivefs.com/user/download/${OBJECTIVEFS_DOWNLOAD_ID}/${DEB_FILE}"
|
||||||
|
dpkg -i "${DEB_FILE}"
|
||||||
|
|
||||||
|
rm "${DEB_FILE}"
|
||||||
|
|
@ -0,0 +1,41 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
PLATFORM_TYPE=${1}
|
||||||
|
|
||||||
|
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
|
||||||
|
PLATFORM=$BUILDPLATFORM
|
||||||
|
else
|
||||||
|
PLATFORM=$TARGETPLATFORM
|
||||||
|
fi
|
||||||
|
|
||||||
|
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
|
||||||
|
if [[ "x${PLATFORM}" == "x" ]]; then
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
|
||||||
|
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
|
||||||
|
if [ "$PLATFORM" = "linux/amd64" ]; then
|
||||||
|
export PLATFORM_ARCH="amd64"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm64" ]; then
|
||||||
|
export PLATFORM_ARCH="arm"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
|
||||||
|
export PLATFORM_ARCH="arm-v7"
|
||||||
|
else
|
||||||
|
echo "unsupported/unknown restic PLATFORM ${PLATFORM}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "I am installing rclone $RCLONE_VERSION"
|
||||||
|
|
||||||
|
export ZIP_FILE="rclone.zip"
|
||||||
|
wget -O "${ZIP_FILE}" "https://github.com/rclone/rclone/releases/download/v${RCLONE_VERSION}/rclone-v${RCLONE_VERSION}-linux-${PLATFORM_ARCH}.zip"
|
||||||
|
unzip "${ZIP_FILE}"
|
||||||
|
|
||||||
|
mv rclone-*-linux-*/rclone /usr/local/bin/rclone
|
||||||
|
rm -rf rclone-*-linux-*
|
||||||
|
chown root:root /usr/local/bin/rclone
|
||||||
|
chmod +x /usr/local/bin/rclone
|
||||||
|
|
@ -0,0 +1,42 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
set -x
|
||||||
|
|
||||||
|
PLATFORM_TYPE=${1}
|
||||||
|
|
||||||
|
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
|
||||||
|
PLATFORM=$BUILDPLATFORM
|
||||||
|
else
|
||||||
|
PLATFORM=$TARGETPLATFORM
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "x${PLATFORM}" == "x" ]]; then
|
||||||
|
PLATFORM="linux/amd64"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
|
||||||
|
# linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
|
||||||
|
if [ "$PLATFORM" = "linux/amd64" ]; then
|
||||||
|
export PLATFORM_ARCH="amd64"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm64" ]; then
|
||||||
|
export PLATFORM_ARCH="arm64"
|
||||||
|
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
|
||||||
|
export PLATFORM_ARCH="arm"
|
||||||
|
elif [ "$PLATFORM" = "linux/s390x" ]; then
|
||||||
|
export PLATFORM_ARCH="s390x"
|
||||||
|
elif [ "$PLATFORM" = "linux/ppc64le" ]; then
|
||||||
|
export PLATFORM_ARCH="ppc64le"
|
||||||
|
else
|
||||||
|
echo "unsupported/unknown restic PLATFORM ${PLATFORM}"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "I am installing restic $RESTIC_VERSION"
|
||||||
|
|
||||||
|
export TAR_FILE="restic.bz2"
|
||||||
|
wget -O "${TAR_FILE}" "https://github.com/restic/restic/releases/download/v${RESTIC_VERSION}/restic_${RESTIC_VERSION}_linux_${PLATFORM_ARCH}.bz2"
|
||||||
|
bunzip2 "${TAR_FILE}"
|
||||||
|
mv restic /usr/local/bin
|
||||||
|
chown root:root /usr/local/bin/restic
|
||||||
|
chmod +x /usr/local/bin/restic
|
||||||
|
|
@ -11,6 +11,10 @@ job "democratic-csi-iscsi-node" {
|
||||||
|
|
||||||
env {
|
env {
|
||||||
CSI_NODE_ID = "${attr.unique.hostname}"
|
CSI_NODE_ID = "${attr.unique.hostname}"
|
||||||
|
|
||||||
|
# if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
|
||||||
|
# you can configure the fs detection system used with the following envvar:
|
||||||
|
#FILESYSTEM_TYPE_DETECTION_STRATEGY = "blkid"
|
||||||
}
|
}
|
||||||
|
|
||||||
config {
|
config {
|
||||||
|
|
@ -38,6 +42,15 @@ job "democratic-csi-iscsi-node" {
|
||||||
source = "/"
|
source = "/"
|
||||||
readonly=false
|
readonly=false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# if you run into a scenario where your iscsi volumes are zeroed each time they are mounted,
|
||||||
|
# you can try uncommenting the following additional mount block:
|
||||||
|
#mount {
|
||||||
|
# type = "bind"
|
||||||
|
# target = "/run/udev"
|
||||||
|
# source = "/run/udev"
|
||||||
|
# readonly = true
|
||||||
|
#}
|
||||||
}
|
}
|
||||||
|
|
||||||
template {
|
template {
|
||||||
|
|
|
||||||
|
|
@ -6,17 +6,44 @@ job "democratic-csi-nfs-controller" {
|
||||||
driver = "docker"
|
driver = "docker"
|
||||||
|
|
||||||
config {
|
config {
|
||||||
image = "docker.io/democraticcsi/democratic-csi:latest"
|
image = "docker.io/democraticcsi/democratic-csi:${var.version}"
|
||||||
|
|
||||||
args = [
|
entrypoint = [
|
||||||
"--csi-version=1.5.0",
|
"${NOMAD_TASK_DIR}/init.sh"
|
||||||
# must match the csi_plugin.id attribute below
|
|
||||||
"--csi-name=org.democratic-csi.nfs",
|
|
||||||
"--driver-config-file=${NOMAD_TASK_DIR}/driver-config-file.yaml",
|
|
||||||
"--log-level=info",
|
|
||||||
"--csi-mode=controller",
|
|
||||||
"--server-socket=/csi/csi.sock",
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
network_mode = "host"
|
||||||
|
privileged = true
|
||||||
|
}
|
||||||
|
|
||||||
|
env {
|
||||||
|
NFS_SERVER = "<nfs server>"
|
||||||
|
NFS_SHARE = "<nfs share>"
|
||||||
|
}
|
||||||
|
|
||||||
|
# The nfs share is mounted in the controller so it can create the volumes
|
||||||
|
# sub directories inside the nfs share
|
||||||
|
template {
|
||||||
|
destination = "${NOMAD_TASK_DIR}/init.sh"
|
||||||
|
perms = "755"
|
||||||
|
|
||||||
|
data = <<-EOT
|
||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ ! -d /storage ]; then
|
||||||
|
mkdir -p /storage
|
||||||
|
fi
|
||||||
|
|
||||||
|
mount "{{ env "NFS_SERVER" }}:{{ env "NFS_SHARE" }}" /storage
|
||||||
|
|
||||||
|
exec ./bin/democratic-csi \
|
||||||
|
--csi-version=1.5.0 \
|
||||||
|
--csi-name=org.democratic-csi.nfs \
|
||||||
|
--driver-config-file={{ env "NOMAD_TASK_DIR" }}/driver-config-file.yaml \
|
||||||
|
--log-level=info \
|
||||||
|
--csi-mode=controller \
|
||||||
|
--server-socket=/csi/csi.sock
|
||||||
|
EOT
|
||||||
}
|
}
|
||||||
|
|
||||||
template {
|
template {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,6 @@
|
||||||
|
# common options for the controller service
|
||||||
|
|
||||||
|
csi:
|
||||||
|
# manual override of the available access modes for the deployment
|
||||||
|
# generally highly uncessary to alter so only use in advanced scenarios
|
||||||
|
#access_modes: []
|
||||||
|
|
@ -42,6 +42,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/b/vols
|
datasetParentName: tank/k8s/b/vols
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
|
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
|
||||||
# "" (inherit), lz4, gzip-9, etc
|
# "" (inherit), lz4, gzip-9, etc
|
||||||
zvolCompression:
|
zvolCompression:
|
||||||
|
|
@ -67,6 +68,8 @@ iscsi:
|
||||||
# add as many as needed
|
# add as many as needed
|
||||||
targetGroups:
|
targetGroups:
|
||||||
# get the correct ID from the "portal" section in the UI
|
# get the correct ID from the "portal" section in the UI
|
||||||
|
# https://github.com/democratic-csi/democratic-csi/issues/302
|
||||||
|
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
|
||||||
- targetGroupPortalGroup: 1
|
- targetGroupPortalGroup: 1
|
||||||
# get the correct ID from the "initiators" section in the UI
|
# get the correct ID from the "initiators" section in the UI
|
||||||
targetGroupInitiatorGroup: 1
|
targetGroupInitiatorGroup: 1
|
||||||
|
|
|
||||||
|
|
@ -37,6 +37,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/a/vols
|
datasetParentName: tank/k8s/a/vols
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
||||||
datasetEnableQuotas: true
|
datasetEnableQuotas: true
|
||||||
datasetEnableReservation: false
|
datasetEnableReservation: false
|
||||||
|
|
|
||||||
|
|
@ -42,6 +42,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/a/vols
|
datasetParentName: tank/k8s/a/vols
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
||||||
datasetEnableQuotas: true
|
datasetEnableQuotas: true
|
||||||
datasetEnableReservation: false
|
datasetEnableReservation: false
|
||||||
|
|
|
||||||
|
|
@ -51,6 +51,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/b/vols
|
datasetParentName: tank/k8s/b/vols
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
|
detachedSnapshotsDatasetParentName: tanks/k8s/b/snaps
|
||||||
# "" (inherit), lz4, gzip-9, etc
|
# "" (inherit), lz4, gzip-9, etc
|
||||||
zvolCompression:
|
zvolCompression:
|
||||||
|
|
@ -76,6 +77,8 @@ iscsi:
|
||||||
# add as many as needed
|
# add as many as needed
|
||||||
targetGroups:
|
targetGroups:
|
||||||
# get the correct ID from the "portal" section in the UI
|
# get the correct ID from the "portal" section in the UI
|
||||||
|
# https://github.com/democratic-csi/democratic-csi/issues/302
|
||||||
|
# NOTE: the ID in the UI does NOT always match the ID in the DB, you must use the DB value
|
||||||
- targetGroupPortalGroup: 1
|
- targetGroupPortalGroup: 1
|
||||||
# get the correct ID from the "initiators" section in the UI
|
# get the correct ID from the "initiators" section in the UI
|
||||||
targetGroupInitiatorGroup: 1
|
targetGroupInitiatorGroup: 1
|
||||||
|
|
|
||||||
|
|
@ -47,6 +47,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/a/vols
|
datasetParentName: tank/k8s/a/vols
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
||||||
datasetEnableQuotas: true
|
datasetEnableQuotas: true
|
||||||
datasetEnableReservation: false
|
datasetEnableReservation: false
|
||||||
|
|
|
||||||
|
|
@ -53,6 +53,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/a/vols
|
datasetParentName: tank/k8s/a/vols
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
||||||
datasetEnableQuotas: true
|
datasetEnableQuotas: true
|
||||||
datasetEnableReservation: false
|
datasetEnableReservation: false
|
||||||
|
|
|
||||||
|
|
@ -3,8 +3,57 @@ instance_id:
|
||||||
local-hostpath:
|
local-hostpath:
|
||||||
# generally shareBasePath and controllerBasePath should be the same for this
|
# generally shareBasePath and controllerBasePath should be the same for this
|
||||||
# driver, this path should be mounted into the csi-driver container
|
# driver, this path should be mounted into the csi-driver container
|
||||||
shareBasePath: "/var/lib/csi-local-hostpath"
|
shareBasePath: "/var/lib/csi-local-hostpath"
|
||||||
controllerBasePath: "/var/lib/csi-local-hostpath"
|
controllerBasePath: "/var/lib/csi-local-hostpath"
|
||||||
dirPermissionsMode: "0777"
|
dirPermissionsMode: "0777"
|
||||||
dirPermissionsUser: 0
|
dirPermissionsUser: 0
|
||||||
dirPermissionsGroup: 0
|
dirPermissionsGroup: 0
|
||||||
|
snapshots:
|
||||||
|
# can create multiple snapshot classes each with a parameters.driver value which
|
||||||
|
# overrides the default, a single install can use all 3 simultaneously if desired
|
||||||
|
#
|
||||||
|
# available options:
|
||||||
|
# - filecopy = rsync/cp
|
||||||
|
# - restic
|
||||||
|
# - kopia
|
||||||
|
#
|
||||||
|
default_driver: filecopy
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# of local-hostpath the node name will be appended
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
restic:
|
||||||
|
global_flags: []
|
||||||
|
# - --insecure-tls
|
||||||
|
|
||||||
|
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
|
||||||
|
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
|
||||||
|
# host will be set to csi driver name
|
||||||
|
tags: []
|
||||||
|
# - foobar
|
||||||
|
# - baz=bar
|
||||||
|
|
||||||
|
# automatically prune when a snapshot is deleted
|
||||||
|
prune: true
|
||||||
|
|
||||||
|
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
|
||||||
|
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
|
||||||
|
env: {}
|
||||||
|
# RESTIC_PASSWORD
|
||||||
|
# RESTIC_REPOSITORY
|
||||||
|
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
|
||||||
|
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
|
||||||
|
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
|
||||||
|
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# of local-hostpath the node name will be appended
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
kopia:
|
||||||
|
# kopia repository status -t -s
|
||||||
|
config_token:
|
||||||
|
global_flags: []
|
||||||
|
# <key>:<value>
|
||||||
|
tags: []
|
||||||
|
# - "foobar:true"
|
||||||
|
env: {}
|
||||||
|
|
|
||||||
|
|
@ -9,3 +9,50 @@ lustre:
|
||||||
dirPermissionsMode: "0777"
|
dirPermissionsMode: "0777"
|
||||||
dirPermissionsUser: root
|
dirPermissionsUser: root
|
||||||
dirPermissionsGroup: wheel
|
dirPermissionsGroup: wheel
|
||||||
|
snapshots:
|
||||||
|
# can create multiple snapshot classes each with a parameters.driver value which
|
||||||
|
# overrides the default, a single install can use all 3 simultaneously if desired
|
||||||
|
#
|
||||||
|
# available options:
|
||||||
|
# - filecopy = rsync/cp
|
||||||
|
# - restic
|
||||||
|
# - kopia
|
||||||
|
#
|
||||||
|
default_driver: filecopy
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
restic:
|
||||||
|
global_flags: []
|
||||||
|
# - --insecure-tls
|
||||||
|
|
||||||
|
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
|
||||||
|
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
|
||||||
|
# host will be set to csi driver name
|
||||||
|
tags: []
|
||||||
|
# - foobar
|
||||||
|
# - baz=bar
|
||||||
|
|
||||||
|
# automatically prune when a snapshot is deleted
|
||||||
|
prune: true
|
||||||
|
|
||||||
|
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
|
||||||
|
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
|
||||||
|
env: {}
|
||||||
|
# RESTIC_PASSWORD
|
||||||
|
# RESTIC_REPOSITORY
|
||||||
|
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
|
||||||
|
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
|
||||||
|
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
|
||||||
|
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
|
||||||
|
|
||||||
|
# backup hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
kopia:
|
||||||
|
# kopia repository status -t -s
|
||||||
|
config_token:
|
||||||
|
global_flags: []
|
||||||
|
# <key>:<value>
|
||||||
|
tags: []
|
||||||
|
# - "foobar:true"
|
||||||
|
env: {}
|
||||||
|
|
|
||||||
|
|
@ -8,3 +8,50 @@ nfs:
|
||||||
dirPermissionsMode: "0777"
|
dirPermissionsMode: "0777"
|
||||||
dirPermissionsUser: root
|
dirPermissionsUser: root
|
||||||
dirPermissionsGroup: wheel
|
dirPermissionsGroup: wheel
|
||||||
|
snapshots:
|
||||||
|
# can create multiple snapshot classes each with a parameters.driver value which
|
||||||
|
# overrides the default, a single install can use all 3 simultaneously if desired
|
||||||
|
#
|
||||||
|
# available options:
|
||||||
|
# - filecopy = rsync/cp
|
||||||
|
# - restic
|
||||||
|
# - kopia
|
||||||
|
#
|
||||||
|
default_driver: filecopy
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
restic:
|
||||||
|
global_flags: []
|
||||||
|
# - --insecure-tls
|
||||||
|
|
||||||
|
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
|
||||||
|
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
|
||||||
|
# host will be set to csi driver name
|
||||||
|
tags: []
|
||||||
|
# - foobar
|
||||||
|
# - baz=bar
|
||||||
|
|
||||||
|
# automatically prune when a snapshot is deleted
|
||||||
|
prune: true
|
||||||
|
|
||||||
|
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
|
||||||
|
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
|
||||||
|
env: {}
|
||||||
|
# RESTIC_PASSWORD
|
||||||
|
# RESTIC_REPOSITORY
|
||||||
|
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
|
||||||
|
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
|
||||||
|
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
|
||||||
|
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
kopia:
|
||||||
|
# kopia repository status -t -s
|
||||||
|
config_token:
|
||||||
|
global_flags: []
|
||||||
|
# <key>:<value>
|
||||||
|
tags: []
|
||||||
|
# - "foobar:true"
|
||||||
|
env: {}
|
||||||
|
|
|
||||||
|
|
@ -9,21 +9,6 @@ spec:
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteOnce
|
- ReadWriteOnce
|
||||||
persistentVolumeReclaimPolicy: Retain
|
persistentVolumeReclaimPolicy: Retain
|
||||||
# can be used to handle CHAP
|
|
||||||
# in the secret create the following keys:
|
|
||||||
#
|
|
||||||
# # any arbitrary iscsiadm entries can be add by creating keys starting with node-db.<entry.name>
|
|
||||||
# # if doing CHAP
|
|
||||||
# node-db.node.session.auth.authmethod: CHAP
|
|
||||||
# node-db.node.session.auth.username: foo
|
|
||||||
# node-db.node.session.auth.password: bar
|
|
||||||
#
|
|
||||||
# # if doing mutual CHAP
|
|
||||||
# node-db.node.session.auth.username_in: baz
|
|
||||||
# node-db.node.session.auth.password_in: bar
|
|
||||||
#nodeStageSecretRef:
|
|
||||||
# name: some name
|
|
||||||
# namespace: some namespace
|
|
||||||
mountOptions: []
|
mountOptions: []
|
||||||
csi:
|
csi:
|
||||||
driver: org.democratic-csi.node-manual
|
driver: org.democratic-csi.node-manual
|
||||||
|
|
@ -31,6 +16,21 @@ spec:
|
||||||
# can be ext4 or xfs
|
# can be ext4 or xfs
|
||||||
fsType: ext4
|
fsType: ext4
|
||||||
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
||||||
|
# can be used to handle CHAP
|
||||||
|
# in the secret create the following keys:
|
||||||
|
#
|
||||||
|
# # any arbitrary iscsiadm entries can be add by creating keys starting with node-db.<entry.name>
|
||||||
|
# # if doing CHAP
|
||||||
|
# node-db.node.session.auth.authmethod: CHAP
|
||||||
|
# node-db.node.session.auth.username: foo
|
||||||
|
# node-db.node.session.auth.password: bar
|
||||||
|
#
|
||||||
|
# # if doing mutual CHAP
|
||||||
|
# node-db.node.session.auth.username_in: baz
|
||||||
|
# node-db.node.session.auth.password_in: bar
|
||||||
|
#nodeStageSecretRef:
|
||||||
|
# name: some name
|
||||||
|
# namespace: some namespace
|
||||||
volumeAttributes:
|
volumeAttributes:
|
||||||
portal: <ip:port>
|
portal: <ip:port>
|
||||||
#portals: <ip:port>,<ip:port>,...
|
#portals: <ip:port>,<ip:port>,...
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,51 @@
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Secret
|
||||||
|
metadata:
|
||||||
|
name: objectivefs-secret
|
||||||
|
namespace: kube-system
|
||||||
|
stringData:
|
||||||
|
# these can be defined here OR in volumeAttributes
|
||||||
|
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
|
||||||
|
"env.OBJECTSTORE": ""
|
||||||
|
"env.ACCESS_KEY": ""
|
||||||
|
"env.SECRET_KEY": ""
|
||||||
|
"env.OBJECTIVEFS_PASSPHRASE": ""
|
||||||
|
# does NOT need admin key appended for node-manual operations
|
||||||
|
"env.OBJECTIVEFS_LICENSE": ""
|
||||||
|
"env.ENDPOINT": ""
|
||||||
|
# ...
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolume
|
||||||
|
metadata:
|
||||||
|
name: objectivefs-manual
|
||||||
|
spec:
|
||||||
|
capacity:
|
||||||
|
storage: 1Gi
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteMany
|
||||||
|
persistentVolumeReclaimPolicy: Retain
|
||||||
|
mountOptions:
|
||||||
|
[]
|
||||||
|
# https://objectivefs.com/userguide#mount
|
||||||
|
#- nodiratime
|
||||||
|
#- noatime
|
||||||
|
#- fsavail=<size>
|
||||||
|
csi:
|
||||||
|
driver: org.democratic-csi.node-manual
|
||||||
|
readOnly: false
|
||||||
|
fsType: objectivefs
|
||||||
|
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
||||||
|
nodeStageSecretRef:
|
||||||
|
name: objectivefs-secret
|
||||||
|
namespace: kube-system
|
||||||
|
volumeAttributes:
|
||||||
|
node_attach_driver: objectivefs
|
||||||
|
provisioner_driver: node-manual
|
||||||
|
filesystem: "ofs/test"
|
||||||
|
# these can be defined here OR in the secret referenced above
|
||||||
|
# secrets are processed *before* volumeAttributes and therefore volumeAttributes will take precedence
|
||||||
|
#"env.OBJECTSTORE": "minio://"
|
||||||
|
#"env.ACCESS_KEY": ""
|
||||||
|
# ...
|
||||||
|
|
@ -9,9 +9,6 @@ spec:
|
||||||
accessModes:
|
accessModes:
|
||||||
- ReadWriteMany
|
- ReadWriteMany
|
||||||
persistentVolumeReclaimPolicy: Retain
|
persistentVolumeReclaimPolicy: Retain
|
||||||
#nodeStageSecretRef:
|
|
||||||
# name: some name
|
|
||||||
# namespace: some namespace
|
|
||||||
mountOptions:
|
mountOptions:
|
||||||
# creds can be entered into the node-stage-secret in the `mount_flags` key
|
# creds can be entered into the node-stage-secret in the `mount_flags` key
|
||||||
# the value should be: username=foo,password=bar
|
# the value should be: username=foo,password=bar
|
||||||
|
|
@ -22,6 +19,9 @@ spec:
|
||||||
readOnly: false
|
readOnly: false
|
||||||
fsType: cifs
|
fsType: cifs
|
||||||
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
volumeHandle: unique-volumeid # make sure it's a unique id in the cluster
|
||||||
|
#nodeStageSecretRef:
|
||||||
|
# name: some name
|
||||||
|
# namespace: some namespace
|
||||||
volumeAttributes:
|
volumeAttributes:
|
||||||
server: host or ip
|
server: host or ip
|
||||||
share: someshare
|
share: someshare
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,32 @@
|
||||||
|
driver: objectivefs
|
||||||
|
objectivefs:
|
||||||
|
# note, ALL provisioned filesystems will be created in this pool / bucket
|
||||||
|
# with the same passphrase entered below
|
||||||
|
#
|
||||||
|
# in general this pool should be considered as fully managed by democratic-csi
|
||||||
|
# so a dedicated pool per-cluster / deployment would be best practice
|
||||||
|
#
|
||||||
|
pool: ofscsi
|
||||||
|
cli:
|
||||||
|
sudoEnabled: false
|
||||||
|
env:
|
||||||
|
# NOTE: this must be the license key + admin key
|
||||||
|
# admin key feature must be activated on your account
|
||||||
|
# https://objectivefs.com/howto/objectivefs-admin-key-setup
|
||||||
|
OBJECTIVEFS_LICENSE:
|
||||||
|
OBJECTSTORE:
|
||||||
|
ENDPOINT:
|
||||||
|
SECRET_KEY:
|
||||||
|
ACCESS_KEY:
|
||||||
|
# do NOT change this once it has been set and deployed
|
||||||
|
OBJECTIVEFS_PASSPHRASE:
|
||||||
|
# ...
|
||||||
|
|
||||||
|
_private:
|
||||||
|
csi:
|
||||||
|
volume:
|
||||||
|
idHash:
|
||||||
|
# due to 63 char limit on objectivefs fs name, we should
|
||||||
|
# hash volume names to prevent fs names which are too long
|
||||||
|
# can be 1 of md5, crc8, crc16, crc32
|
||||||
|
strategy: crc32
|
||||||
|
|
@ -8,10 +8,31 @@
|
||||||
_private:
|
_private:
|
||||||
csi:
|
csi:
|
||||||
volume:
|
volume:
|
||||||
derivedContext:
|
volumeContext:
|
||||||
# driver left blank is used to auto select
|
# driver left blank is used to auto select
|
||||||
driver: memory # strictly to facilitate testing
|
driver: memory # strictly to facilitate testing
|
||||||
#driver: kubernetes
|
#driver: kubernetes
|
||||||
|
|
||||||
|
# THIS IS UNSUPPORTED, BAD THINGS WILL HAPPEN IF NOT CONFIGURED PROPERLY
|
||||||
|
# https://github.com/democratic-csi/democratic-csi/issues/289
|
||||||
|
#
|
||||||
|
# note the volume length must *always* be the same for every call for the same volume by the CO
|
||||||
|
# the length must NOT execeed 128 characters
|
||||||
|
# must start with an alphanumeric character
|
||||||
|
# must only contain alphnumeric characters or `-` or `_`
|
||||||
|
idTemplate: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
|
||||||
|
|
||||||
|
# THIS IS UNSUPPORTED, BAD THINGS WILL HAPPEN IF NOT CONFIGURED PROPERLY
|
||||||
|
# https://github.com/democratic-csi/democratic-csi/issues/289
|
||||||
|
#
|
||||||
|
# in order for this to behave sanely you *MUST* set consistent templates for
|
||||||
|
# share names/assets (ie: nfs/iscsi/etc) and the `idTemplate` above
|
||||||
|
#
|
||||||
|
# setting to retain results in noop delete opertions (both shares where applicable and volumes remain intact)
|
||||||
|
# delete|retain
|
||||||
|
deleteStrategy: retain
|
||||||
|
|
||||||
|
# if set, this hash is applied *after* the templating above
|
||||||
idHash:
|
idHash:
|
||||||
strategy: crc16
|
strategy: crc16
|
||||||
#strategy: crc32
|
#strategy: crc32
|
||||||
|
|
|
||||||
|
|
@ -8,3 +8,50 @@ smb:
|
||||||
dirPermissionsMode: "0777"
|
dirPermissionsMode: "0777"
|
||||||
dirPermissionsUser: root
|
dirPermissionsUser: root
|
||||||
dirPermissionsGroup: wheel
|
dirPermissionsGroup: wheel
|
||||||
|
snapshots:
|
||||||
|
# can create multiple snapshot classes each with a parameters.driver value which
|
||||||
|
# overrides the default, a single install can use all 3 simultaneously if desired
|
||||||
|
#
|
||||||
|
# available options:
|
||||||
|
# - filecopy = rsync/cp
|
||||||
|
# - restic
|
||||||
|
# - kopia
|
||||||
|
#
|
||||||
|
default_driver: filecopy
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
restic:
|
||||||
|
global_flags: []
|
||||||
|
# - --insecure-tls
|
||||||
|
|
||||||
|
# these are added to snapshots, but are NOT used for querying/selectors by democratic-csi
|
||||||
|
# it is *HIGHLY* recommended to set the instance_id parameter when using restic, it should be a universally unique ID for every deployment
|
||||||
|
# host will be set to csi driver name
|
||||||
|
tags: []
|
||||||
|
# - foobar
|
||||||
|
# - baz=bar
|
||||||
|
|
||||||
|
# automatically prune when a snapshot is deleted
|
||||||
|
prune: true
|
||||||
|
|
||||||
|
# at a minimum RESTIC_PASSWORD and RESTIC_REPOSITORY must be set, additionally
|
||||||
|
# any relevant env vars for connecting to RESTIC_REPOSITORY should be set
|
||||||
|
env: {}
|
||||||
|
# RESTIC_PASSWORD
|
||||||
|
# RESTIC_REPOSITORY
|
||||||
|
# AWS_ACCESS_KEY_ID=<MY_ACCESS_KEY>
|
||||||
|
# AWS_SECRET_ACCESS_KEY=<MY_SECRET_ACCESS_KEY>
|
||||||
|
# B2_ACCOUNT_ID=<MY_APPLICATION_KEY_ID>
|
||||||
|
# B2_ACCOUNT_KEY=<MY_APPLICATION_KEY>
|
||||||
|
|
||||||
|
# snapshot hostname will be set to the csiDriver.name value, in the case
|
||||||
|
# it is assumed that the repo has been created beforehand
|
||||||
|
kopia:
|
||||||
|
# kopia repository status -t -s
|
||||||
|
config_token:
|
||||||
|
global_flags: []
|
||||||
|
# <key>:<value>
|
||||||
|
tags: []
|
||||||
|
# - "foobar:true"
|
||||||
|
env: {}
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/test
|
datasetParentName: tank/k8s/test
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||||
|
|
||||||
# "" (inherit), lz4, gzip-9, etc
|
# "" (inherit), lz4, gzip-9, etc
|
||||||
|
|
@ -70,6 +71,10 @@ iscsi:
|
||||||
# mutual CHAP
|
# mutual CHAP
|
||||||
#mutual_userid: "baz"
|
#mutual_userid: "baz"
|
||||||
#mutual_password: "bar"
|
#mutual_password: "bar"
|
||||||
|
block:
|
||||||
|
attributes:
|
||||||
|
# set to 1 to enable Thin Provisioning Unmap
|
||||||
|
emulate_tpu: 0
|
||||||
targetPortal: "server[:port]"
|
targetPortal: "server[:port]"
|
||||||
# for multipath
|
# for multipath
|
||||||
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
|
targetPortals: [] # [ "server[:port]", "server[:port]", ... ]
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/test
|
datasetParentName: tank/k8s/test
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||||
|
|
||||||
datasetEnableQuotas: true
|
datasetEnableQuotas: true
|
||||||
|
|
|
||||||
|
|
@ -65,6 +65,7 @@ nvmeof:
|
||||||
# http://git.infradead.org/users/hch/nvmetcli.git
|
# http://git.infradead.org/users/hch/nvmetcli.git
|
||||||
shareStrategyNvmetCli:
|
shareStrategyNvmetCli:
|
||||||
#sudoEnabled: true
|
#sudoEnabled: true
|
||||||
|
# /root/.local/bin/nvmetcli
|
||||||
#nvmetcliPath: nvmetcli
|
#nvmetcliPath: nvmetcli
|
||||||
# prevent startup race conditions by ensuring the config on disk has been imported
|
# prevent startup race conditions by ensuring the config on disk has been imported
|
||||||
# before we start messing with things
|
# before we start messing with things
|
||||||
|
|
@ -73,7 +74,7 @@ nvmeof:
|
||||||
basename: "nqn.2003-01.org.linux-nvme"
|
basename: "nqn.2003-01.org.linux-nvme"
|
||||||
# add more ports here as appropriate if you have multipath
|
# add more ports here as appropriate if you have multipath
|
||||||
ports:
|
ports:
|
||||||
- "1"
|
- "1"
|
||||||
subsystem:
|
subsystem:
|
||||||
attributes:
|
attributes:
|
||||||
allow_any_host: 1
|
allow_any_host: 1
|
||||||
|
|
@ -96,7 +97,7 @@ nvmeof:
|
||||||
attributes:
|
attributes:
|
||||||
allow_any_host: "true"
|
allow_any_host: "true"
|
||||||
listeners:
|
listeners:
|
||||||
- trtype: tcp
|
- trtype: tcp
|
||||||
traddr: server
|
traddr: server
|
||||||
trsvcid: port
|
trsvcid: port
|
||||||
adrfam: ipv4
|
adrfam: ipv4
|
||||||
|
|
|
||||||
|
|
@ -32,6 +32,7 @@ zfs:
|
||||||
datasetParentName: tank/k8s/test
|
datasetParentName: tank/k8s/test
|
||||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||||
# they may be siblings, but neither should be nested in the other
|
# they may be siblings, but neither should be nested in the other
|
||||||
|
# do NOT comment this option out even if you don't plan to use snapshots, just leave it with dummy value
|
||||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||||
|
|
||||||
datasetEnableQuotas: true
|
datasetEnableQuotas: true
|
||||||
|
|
|
||||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,6 +1,6 @@
|
||||||
{
|
{
|
||||||
"name": "democratic-csi",
|
"name": "democratic-csi",
|
||||||
"version": "1.8.2",
|
"version": "1.9.0",
|
||||||
"description": "kubernetes csi driver framework",
|
"description": "kubernetes csi driver framework",
|
||||||
"main": "bin/democratic-csi",
|
"main": "bin/democratic-csi",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
|
@ -24,6 +24,7 @@
|
||||||
"async-mutex": "^0.4.0",
|
"async-mutex": "^0.4.0",
|
||||||
"axios": "^1.1.3",
|
"axios": "^1.1.3",
|
||||||
"bunyan": "^1.8.15",
|
"bunyan": "^1.8.15",
|
||||||
|
"crc": "^4.3.2",
|
||||||
"fs-extra": "^11.1.0",
|
"fs-extra": "^11.1.0",
|
||||||
"handlebars": "^4.7.7",
|
"handlebars": "^4.7.7",
|
||||||
"js-yaml": "^4.0.0",
|
"js-yaml": "^4.0.0",
|
||||||
|
|
|
||||||
|
|
@ -4,9 +4,19 @@ const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
const cp = require("child_process");
|
const cp = require("child_process");
|
||||||
const fs = require("fs");
|
const fs = require("fs");
|
||||||
const fse = require("fs-extra");
|
const fse = require("fs-extra");
|
||||||
|
const Kopia = require("../../utils/kopia").Kopia;
|
||||||
|
const os = require("os");
|
||||||
const path = require("path");
|
const path = require("path");
|
||||||
|
const Restic = require("../../utils/restic").Restic;
|
||||||
const semver = require("semver");
|
const semver = require("semver");
|
||||||
|
|
||||||
|
const __REGISTRY_NS__ = "ControllerClientCommonDriver";
|
||||||
|
|
||||||
|
// https://forum.restic.net/t/how-to-prevent-two-restic-tasks-concurrently/6859/5
|
||||||
|
const SNAPSHOTS_CUT_IN_FLIGHT = new Set();
|
||||||
|
const SNAPSHOTS_RESTORE_IN_FLIGHT = new Set();
|
||||||
|
const DEFAULT_SNAPSHOT_DRIVER = "filecopy";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Crude nfs-client driver which simply creates directories to be mounted
|
* Crude nfs-client driver which simply creates directories to be mounted
|
||||||
* and uses rsync for cloning/snapshots
|
* and uses rsync for cloning/snapshots
|
||||||
|
|
@ -102,6 +112,21 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
|
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (this.ctx.args.csiMode.includes("controller")) {
|
||||||
|
setInterval(() => {
|
||||||
|
this.ctx.logger.info("snapshots cut in flight", {
|
||||||
|
names: [...SNAPSHOTS_CUT_IN_FLIGHT],
|
||||||
|
count: SNAPSHOTS_CUT_IN_FLIGHT.size,
|
||||||
|
});
|
||||||
|
}, 30 * 1000);
|
||||||
|
setInterval(() => {
|
||||||
|
this.ctx.logger.info("snapshots restore in flight", {
|
||||||
|
names: [...SNAPSHOTS_RESTORE_IN_FLIGHT],
|
||||||
|
count: SNAPSHOTS_RESTORE_IN_FLIGHT.size,
|
||||||
|
});
|
||||||
|
}, 30 * 1000);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
getAccessModes(capability) {
|
getAccessModes(capability) {
|
||||||
|
|
@ -429,6 +454,90 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
return p.replaceAll(path.posix.sep, path.win32.sep);
|
return p.replaceAll(path.posix.sep, path.win32.sep);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async getResticClient() {
|
||||||
|
const driver = this;
|
||||||
|
|
||||||
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:restic`, () => {
|
||||||
|
const config_key = driver.getConfigKey();
|
||||||
|
|
||||||
|
const restic_env = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.restic.env",
|
||||||
|
{}
|
||||||
|
);
|
||||||
|
|
||||||
|
const restic_global_flags = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.restic.global_flags",
|
||||||
|
[]
|
||||||
|
);
|
||||||
|
const client = new Restic({
|
||||||
|
env: restic_env,
|
||||||
|
logger: driver.ctx.logger,
|
||||||
|
global_flags: restic_global_flags,
|
||||||
|
});
|
||||||
|
|
||||||
|
let hostname = driver.ctx.args.csiName;
|
||||||
|
if (driver.options.driver == "local-hostpath") {
|
||||||
|
let nodename = process.env.CSI_NODE_ID || os.hostname();
|
||||||
|
hostname = `${hostname}-${nodename}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
return client;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async getKopiaClient() {
|
||||||
|
const driver = this;
|
||||||
|
|
||||||
|
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:kopia`, async () => {
|
||||||
|
const config_key = driver.getConfigKey();
|
||||||
|
|
||||||
|
const kopia_env = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.kopia.env",
|
||||||
|
{}
|
||||||
|
);
|
||||||
|
|
||||||
|
const kopia_global_flags = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.kopia.global_flags",
|
||||||
|
[]
|
||||||
|
);
|
||||||
|
const client = new Kopia({
|
||||||
|
env: kopia_env,
|
||||||
|
logger: driver.ctx.logger,
|
||||||
|
global_flags: kopia_global_flags,
|
||||||
|
});
|
||||||
|
|
||||||
|
let hostname = driver.ctx.args.csiName;
|
||||||
|
if (driver.options.driver == "local-hostpath") {
|
||||||
|
let nodename = process.env.CSI_NODE_ID || os.hostname();
|
||||||
|
hostname = `${hostname}-${nodename}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let username = "democratic-csi";
|
||||||
|
|
||||||
|
await client.repositoryConnect([
|
||||||
|
"--override-hostname",
|
||||||
|
hostname,
|
||||||
|
"--override-username",
|
||||||
|
username,
|
||||||
|
"from-config",
|
||||||
|
"--token",
|
||||||
|
_.get(driver.options[config_key], "snapshots.kopia.config_token", ""),
|
||||||
|
]);
|
||||||
|
|
||||||
|
//let repositoryStatus = await client.repositoryStatus();
|
||||||
|
//console.log(repositoryStatus);
|
||||||
|
|
||||||
|
client.hostname = hostname;
|
||||||
|
client.username = username;
|
||||||
|
|
||||||
|
return client;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a volume doing in essence the following:
|
* Create a volume doing in essence the following:
|
||||||
* 1. create directory
|
* 1. create directory
|
||||||
|
|
@ -442,16 +551,10 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
async CreateVolume(call) {
|
async CreateVolume(call) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
|
|
||||||
let config_key = this.getConfigKey();
|
const config_key = driver.getConfigKey();
|
||||||
let name = call.request.name;
|
const volume_id = await driver.getVolumeIdFromCall(call);
|
||||||
let volume_content_source = call.request.volume_content_source;
|
const volume_content_source = call.request.volume_content_source;
|
||||||
|
const instance_id = driver.options.instance_id;
|
||||||
if (!name) {
|
|
||||||
throw new GrpcError(
|
|
||||||
grpc.status.INVALID_ARGUMENT,
|
|
||||||
`volume name is required`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
call.request.volume_capabilities &&
|
call.request.volume_capabilities &&
|
||||||
|
|
@ -513,7 +616,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const volume_path = driver.getControllerVolumePath(name);
|
const volume_path = driver.getControllerVolumePath(volume_id);
|
||||||
|
|
||||||
let response;
|
let response;
|
||||||
let source_path;
|
let source_path;
|
||||||
|
|
@ -525,13 +628,117 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
// create dataset
|
// create dataset
|
||||||
if (volume_content_source) {
|
if (volume_content_source) {
|
||||||
|
let snapshot_driver;
|
||||||
|
let snapshot_id;
|
||||||
|
|
||||||
|
if (volume_content_source.type == "snapshot") {
|
||||||
|
snapshot_id = volume_content_source.snapshot.snapshot_id;
|
||||||
|
|
||||||
|
// get parsed variant of driver to allow snapshotter to work with all
|
||||||
|
// drivers simultaneously
|
||||||
|
const parsed_snapshot_id = new URLSearchParams(snapshot_id);
|
||||||
|
if (parsed_snapshot_id.get("snapshot_driver")) {
|
||||||
|
snapshot_id = parsed_snapshot_id.get("snapshot_id");
|
||||||
|
snapshot_driver = parsed_snapshot_id.get("snapshot_driver");
|
||||||
|
} else {
|
||||||
|
snapshot_driver = "filecopy";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
switch (volume_content_source.type) {
|
switch (volume_content_source.type) {
|
||||||
// must be available when adverstising CREATE_DELETE_SNAPSHOT
|
// must be available when adverstising CREATE_DELETE_SNAPSHOT
|
||||||
// simply clone
|
// simply clone
|
||||||
case "snapshot":
|
case "snapshot":
|
||||||
source_path = driver.getControllerSnapshotPath(
|
switch (snapshot_driver) {
|
||||||
volume_content_source.snapshot.snapshot_id
|
case "filecopy":
|
||||||
);
|
{
|
||||||
|
source_path = driver.getControllerSnapshotPath(snapshot_id);
|
||||||
|
|
||||||
|
if (!(await driver.directoryExists(source_path))) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.NOT_FOUND,
|
||||||
|
`invalid volume_content_source path: ${source_path}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
driver.ctx.logger.debug(
|
||||||
|
"controller volume source path: %s",
|
||||||
|
source_path
|
||||||
|
);
|
||||||
|
await driver.cloneDir(source_path, volume_path);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "restic":
|
||||||
|
{
|
||||||
|
const restic = await driver.getResticClient();
|
||||||
|
|
||||||
|
let options = [];
|
||||||
|
await restic.init();
|
||||||
|
|
||||||
|
// find snapshot
|
||||||
|
options = [snapshot_id];
|
||||||
|
const snapshots = await restic.snapshots(options);
|
||||||
|
if (!snapshots.length > 0) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.NOT_FOUND,
|
||||||
|
`invalid restic snapshot volume_content_source: ${snapshot_id}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
const snapshot = snapshots[snapshots.length - 1];
|
||||||
|
|
||||||
|
// restore snapshot
|
||||||
|
// --verify?
|
||||||
|
options = [
|
||||||
|
`${snapshot.id}:${snapshot.paths[0]}`,
|
||||||
|
"--target",
|
||||||
|
volume_path,
|
||||||
|
"--sparse",
|
||||||
|
"--host",
|
||||||
|
restic.hostname,
|
||||||
|
];
|
||||||
|
|
||||||
|
// technically same snapshot could be getting restored to multiple volumes simultaneously
|
||||||
|
// ensure we add target path as part of the key
|
||||||
|
SNAPSHOTS_RESTORE_IN_FLIGHT.add(
|
||||||
|
`${snapshot_id}:${volume_path}`
|
||||||
|
);
|
||||||
|
await restic.restore(options).finally(() => {
|
||||||
|
SNAPSHOTS_RESTORE_IN_FLIGHT.delete(
|
||||||
|
`${snapshot_id}:${volume_path}`
|
||||||
|
);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "kopia":
|
||||||
|
{
|
||||||
|
const kopia = await driver.getKopiaClient();
|
||||||
|
const snapshot = await kopia.snapshotGet(snapshot_id);
|
||||||
|
|
||||||
|
if (!snapshot) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.NOT_FOUND,
|
||||||
|
`invalid restic snapshot volume_content_source: ${snapshot_id}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* --[no-]write-files-atomically
|
||||||
|
* --[no-]write-sparse-files
|
||||||
|
*/
|
||||||
|
let options = [
|
||||||
|
"--write-sparse-files",
|
||||||
|
snapshot_id,
|
||||||
|
volume_path,
|
||||||
|
];
|
||||||
|
await kopia.snapshotRestore(options);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`unknown snapthot driver: ${snapshot_driver}`
|
||||||
|
);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
// must be available when adverstising CLONE_VOLUME
|
// must be available when adverstising CLONE_VOLUME
|
||||||
// create snapshot first, then clone
|
// create snapshot first, then clone
|
||||||
|
|
@ -539,24 +746,26 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
source_path = driver.getControllerVolumePath(
|
source_path = driver.getControllerVolumePath(
|
||||||
volume_content_source.volume.volume_id
|
volume_content_source.volume.volume_id
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (!(await driver.directoryExists(source_path))) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.NOT_FOUND,
|
||||||
|
`invalid volume_content_source path: ${source_path}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
driver.ctx.logger.debug(
|
||||||
|
"controller volume source path: %s",
|
||||||
|
source_path
|
||||||
|
);
|
||||||
|
await driver.cloneDir(source_path, volume_path);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
grpc.status.INVALID_ARGUMENT,
|
grpc.status.INVALID_ARGUMENT,
|
||||||
`invalid volume_content_source type: ${volume_content_source.type}`
|
`invalid volume_content_source type: ${volume_content_source.type}`
|
||||||
);
|
);
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(await driver.directoryExists(source_path))) {
|
|
||||||
throw new GrpcError(
|
|
||||||
grpc.status.NOT_FOUND,
|
|
||||||
`invalid volume_content_source path: ${source_path}`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
driver.ctx.logger.debug("controller source path: %s", source_path);
|
|
||||||
await driver.cloneDir(source_path, volume_path);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// set mode
|
// set mode
|
||||||
|
|
@ -596,7 +805,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let volume_context = driver.getVolumeContext(name);
|
let volume_context = driver.getVolumeContext(volume_id);
|
||||||
|
|
||||||
volume_context["provisioner_driver"] = driver.options.driver;
|
volume_context["provisioner_driver"] = driver.options.driver;
|
||||||
if (driver.options.instance_id) {
|
if (driver.options.instance_id) {
|
||||||
|
|
@ -611,7 +820,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
const res = {
|
const res = {
|
||||||
volume: {
|
volume: {
|
||||||
volume_id: name,
|
volume_id,
|
||||||
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||||
capacity_bytes: 0,
|
capacity_bytes: 0,
|
||||||
content_source: volume_content_source,
|
content_source: volume_content_source,
|
||||||
|
|
@ -634,16 +843,27 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
async DeleteVolume(call) {
|
async DeleteVolume(call) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
|
|
||||||
let name = call.request.volume_id;
|
const volume_id = call.request.volume_id;
|
||||||
|
|
||||||
if (!name) {
|
if (!volume_id) {
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
grpc.status.INVALID_ARGUMENT,
|
grpc.status.INVALID_ARGUMENT,
|
||||||
`volume_id is required`
|
`volume_id is required`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const volume_path = driver.getControllerVolumePath(name);
|
// deleteStrategy
|
||||||
|
const delete_strategy = _.get(
|
||||||
|
driver.options,
|
||||||
|
"_private.csi.volume.deleteStrategy",
|
||||||
|
""
|
||||||
|
);
|
||||||
|
|
||||||
|
if (delete_strategy == "retain") {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
const volume_path = driver.getControllerVolumePath(volume_id);
|
||||||
await driver.deleteDir(volume_path);
|
await driver.deleteDir(volume_path);
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
|
|
@ -724,14 +944,49 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* Create snapshot is meant to be a syncronous call to 'cut' the snapshot
|
||||||
|
* in the case of rsync/restic/kopia/etc tooling a 'cut' can take a very
|
||||||
|
* long time. It was deemed appropriate to continue to wait vs making the
|
||||||
|
* call async with `ready_to_use` false.
|
||||||
|
*
|
||||||
|
* Restic:
|
||||||
|
* With restic the idea is to keep the tree scoped to each volume. Each
|
||||||
|
* new snapshot for the same volume should have a parent of the most recently
|
||||||
|
* cut snapshot for the same volume. Behind the scenes restic is applying
|
||||||
|
* dedup logic globally in the repo so efficiency should still be extremely
|
||||||
|
* efficient.
|
||||||
|
*
|
||||||
|
* Kopia:
|
||||||
|
*
|
||||||
|
*
|
||||||
|
* https://github.com/container-storage-interface/spec/blob/master/spec.md#createsnapshot
|
||||||
*
|
*
|
||||||
* @param {*} call
|
* @param {*} call
|
||||||
*/
|
*/
|
||||||
async CreateSnapshot(call) {
|
async CreateSnapshot(call) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
|
|
||||||
|
const config_key = driver.getConfigKey();
|
||||||
|
let snapshot_driver = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.default_driver",
|
||||||
|
DEFAULT_SNAPSHOT_DRIVER
|
||||||
|
);
|
||||||
|
|
||||||
|
// randomize driver for testing
|
||||||
|
//if (process.env.CSI_SANITY == "1") {
|
||||||
|
// call.request.parameters.driver = ["filecopy", "restic", "kopia"].random();
|
||||||
|
//}
|
||||||
|
|
||||||
|
if (call.request.parameters.driver) {
|
||||||
|
snapshot_driver = call.request.parameters.driver;
|
||||||
|
}
|
||||||
|
|
||||||
|
const instance_id = driver.options.instance_id;
|
||||||
|
let response;
|
||||||
|
|
||||||
// both these are required
|
// both these are required
|
||||||
let source_volume_id = call.request.source_volume_id;
|
const source_volume_id = call.request.source_volume_id;
|
||||||
let name = call.request.name;
|
let name = call.request.name;
|
||||||
|
|
||||||
if (!source_volume_id) {
|
if (!source_volume_id) {
|
||||||
|
|
@ -766,17 +1021,262 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
name = name.replace(/[^a-z0-9_\-:.+]+/gi, "");
|
name = name.replace(/[^a-z0-9_\-:.+]+/gi, "");
|
||||||
|
|
||||||
driver.ctx.logger.verbose("cleansed snapshot name: %s", name);
|
driver.ctx.logger.verbose("cleansed snapshot name: %s", name);
|
||||||
|
|
||||||
const snapshot_id = `${source_volume_id}-${name}`;
|
|
||||||
const volume_path = driver.getControllerVolumePath(source_volume_id);
|
const volume_path = driver.getControllerVolumePath(source_volume_id);
|
||||||
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
|
//const volume_path = "/home/thansen/beets/";
|
||||||
|
//const volume_path = "/var/lib/docker/";
|
||||||
|
|
||||||
// do NOT overwrite existing snapshot
|
let snapshot_id;
|
||||||
if (!(await driver.directoryExists(snapshot_path))) {
|
let size_bytes = 0;
|
||||||
await driver.cloneDir(volume_path, snapshot_path);
|
let ready_to_use = true;
|
||||||
|
let snapshot_date = new Date();
|
||||||
|
|
||||||
|
switch (snapshot_driver) {
|
||||||
|
case "filecopy":
|
||||||
|
{
|
||||||
|
snapshot_id = `${source_volume_id}-${name}`;
|
||||||
|
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
|
||||||
|
const snapshot_dir_exists = await driver.directoryExists(
|
||||||
|
snapshot_path
|
||||||
|
);
|
||||||
|
// do NOT overwrite existing snapshot
|
||||||
|
if (!snapshot_dir_exists) {
|
||||||
|
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
|
||||||
|
await driver.cloneDir(volume_path, snapshot_path).finally(() => {
|
||||||
|
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
|
||||||
|
});
|
||||||
|
driver.ctx.logger.info(
|
||||||
|
`filecopy backup finished: snapshot_id=${snapshot_id}, path=${volume_path}`
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
driver.ctx.logger.debug(
|
||||||
|
`filecopy backup already cut: ${snapshot_id}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_bytes = await driver.getDirectoryUsage(snapshot_path);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "restic":
|
||||||
|
{
|
||||||
|
const restic = await driver.getResticClient();
|
||||||
|
const group_by_options = ["--group-by", "host,paths,tags"];
|
||||||
|
let snapshot_exists = false;
|
||||||
|
|
||||||
|
// --tag specified multiple times is OR logic, comma-separated is AND logic
|
||||||
|
let base_tag_option = `source=democratic-csi`;
|
||||||
|
base_tag_option += `,csi_volume_id=${source_volume_id}`;
|
||||||
|
if (instance_id) {
|
||||||
|
base_tag_option += `csi_instance_id=${instance_id}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
let options = [];
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ensure repo has been initted
|
||||||
|
*
|
||||||
|
* it is expected that at a minimum the following env vars are set
|
||||||
|
* RESTIC_PASSWORD
|
||||||
|
* RESTIC_REPOSITORY
|
||||||
|
*/
|
||||||
|
options = [];
|
||||||
|
await restic.init();
|
||||||
|
|
||||||
|
// see if snapshot already exist with matching tags, etc
|
||||||
|
options = [
|
||||||
|
"--path",
|
||||||
|
volume_path.replace(/\/$/, ""),
|
||||||
|
"--host",
|
||||||
|
restic.hostname,
|
||||||
|
];
|
||||||
|
|
||||||
|
// when searching for existing snapshot include name
|
||||||
|
response = await restic.snapshots(
|
||||||
|
options
|
||||||
|
.concat(group_by_options)
|
||||||
|
.concat(["--tag", base_tag_option + `,csi_snapshot_name=${name}`])
|
||||||
|
);
|
||||||
|
|
||||||
|
if (response.length > 0) {
|
||||||
|
snapshot_exists = true;
|
||||||
|
const snapshot = response[response.length - 1];
|
||||||
|
driver.ctx.logger.debug(
|
||||||
|
`restic backup already cut: ${snapshot.id}`
|
||||||
|
);
|
||||||
|
const stats = await restic.stats([snapshot.id]);
|
||||||
|
|
||||||
|
snapshot_id = snapshot.id;
|
||||||
|
snapshot_date = new Date(snapshot.time);
|
||||||
|
size_bytes = stats.total_size;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!snapshot_exists) {
|
||||||
|
// --no-scan do not run scanner to estimate size of backup
|
||||||
|
// -x, --one-file-system exclude other file systems, don't cross filesystem boundaries and subvolumes
|
||||||
|
options = [
|
||||||
|
"--host",
|
||||||
|
restic.hostname,
|
||||||
|
"--one-file-system",
|
||||||
|
//"--no-scan",
|
||||||
|
];
|
||||||
|
|
||||||
|
// backup with minimal tags to ensure a sane parent for the volume (since tags are included in group_by)
|
||||||
|
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
|
||||||
|
response = await restic
|
||||||
|
.backup(
|
||||||
|
volume_path,
|
||||||
|
options
|
||||||
|
.concat(group_by_options)
|
||||||
|
.concat(["--tag", base_tag_option])
|
||||||
|
)
|
||||||
|
.finally(() => {
|
||||||
|
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
|
||||||
|
});
|
||||||
|
response.parsed.reverse();
|
||||||
|
let summary = response.parsed.find((message) => {
|
||||||
|
return message.message_type == "summary";
|
||||||
|
});
|
||||||
|
snapshot_id = summary.snapshot_id;
|
||||||
|
driver.ctx.logger.info(
|
||||||
|
`restic backup finished: snapshot_id=${snapshot_id}, path=${volume_path}, total_duration=${
|
||||||
|
summary.total_duration | 0
|
||||||
|
}s`
|
||||||
|
);
|
||||||
|
const stats = await restic.stats([snapshot_id]);
|
||||||
|
size_bytes = stats.total_size;
|
||||||
|
|
||||||
|
// only apply these tags at creation, do NOT use for search above etc
|
||||||
|
let add_tags = `csi_snapshot_name=${name}`;
|
||||||
|
let config_tags = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.restic.tags",
|
||||||
|
[]
|
||||||
|
);
|
||||||
|
|
||||||
|
if (config_tags.length > 0) {
|
||||||
|
add_tags += `,${config_tags.join(",")}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
await restic.tag([
|
||||||
|
"--path",
|
||||||
|
volume_path.replace(/\/$/, ""),
|
||||||
|
"--host",
|
||||||
|
restic.hostname,
|
||||||
|
"--add",
|
||||||
|
add_tags,
|
||||||
|
snapshot_id,
|
||||||
|
]);
|
||||||
|
|
||||||
|
// this is ugly, the tag operation should output the new id, so we
|
||||||
|
// must resort to full query of all snapshots for the volume
|
||||||
|
// find snapshot using `original` id as adding tags creates a new id
|
||||||
|
options = [
|
||||||
|
"--path",
|
||||||
|
volume_path.replace(/\/$/, ""),
|
||||||
|
"--host",
|
||||||
|
restic.hostname,
|
||||||
|
];
|
||||||
|
response = await restic.snapshots(
|
||||||
|
options
|
||||||
|
.concat(group_by_options)
|
||||||
|
.concat([
|
||||||
|
"--tag",
|
||||||
|
`${base_tag_option},csi_snapshot_name=${name}`,
|
||||||
|
])
|
||||||
|
);
|
||||||
|
let original_snapshot_id = snapshot_id;
|
||||||
|
let snapshot = response.find((snapshot) => {
|
||||||
|
return snapshot.original == original_snapshot_id;
|
||||||
|
});
|
||||||
|
if (!snapshot) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNKNOWN,
|
||||||
|
`failed to find snapshot post-tag operation: snapshot_id=${original_snapshot_id}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
snapshot_id = snapshot.id;
|
||||||
|
driver.ctx.logger.info(
|
||||||
|
`restic backup successfully applied additional tags: new_snapshot_id=${snapshot_id}, original_snapshot_id=${original_snapshot_id} path=${volume_path}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "kopia":
|
||||||
|
{
|
||||||
|
const kopia = await driver.getKopiaClient();
|
||||||
|
let options = [];
|
||||||
|
|
||||||
|
let snapshot_exists = false;
|
||||||
|
|
||||||
|
// --tags specified multiple times means snapshot must contain ALL supplied tags
|
||||||
|
let tags = [];
|
||||||
|
tags.push(`source:democratic-csi`);
|
||||||
|
tags.push(`csi_volume_id:${source_volume_id}`);
|
||||||
|
if (instance_id) {
|
||||||
|
tags.push(`csi_instance_id:${instance_id}`);
|
||||||
|
}
|
||||||
|
tags.push(`csi_snapshot_name:${name}`);
|
||||||
|
|
||||||
|
options = ["--no-storage-stats", "--no-delta"];
|
||||||
|
tags.forEach((item) => {
|
||||||
|
options.push("--tags", item);
|
||||||
|
});
|
||||||
|
|
||||||
|
options.push(
|
||||||
|
`${kopia.username}@${kopia.hostname}:${volume_path.replace(
|
||||||
|
/\/$/,
|
||||||
|
""
|
||||||
|
)}`
|
||||||
|
);
|
||||||
|
|
||||||
|
response = await kopia.snapshotList(options);
|
||||||
|
|
||||||
|
if (response.length > 0) {
|
||||||
|
snapshot_exists = true;
|
||||||
|
const snapshot = response[response.length - 1];
|
||||||
|
driver.ctx.logger.debug(
|
||||||
|
`kopia snapshot already cut: ${snapshot.id}`
|
||||||
|
);
|
||||||
|
|
||||||
|
snapshot_id = snapshot.id;
|
||||||
|
snapshot_date = new Date(snapshot.startTime); // maybe use endTime?
|
||||||
|
size_bytes = snapshot.stats.totalSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!snapshot_exists) {
|
||||||
|
// create snapshot
|
||||||
|
options = [];
|
||||||
|
tags.forEach((item) => {
|
||||||
|
options.push("--tags", item);
|
||||||
|
});
|
||||||
|
options.push(volume_path);
|
||||||
|
SNAPSHOTS_CUT_IN_FLIGHT.add(name);
|
||||||
|
response = await kopia.snapshotCreate(options).finally(() => {
|
||||||
|
SNAPSHOTS_CUT_IN_FLIGHT.delete(name);
|
||||||
|
});
|
||||||
|
|
||||||
|
snapshot_id = response.id;
|
||||||
|
snapshot_date = new Date(response.startTime); // maybe use endTime?
|
||||||
|
let snapshot_end_date = new Date(response.endTime);
|
||||||
|
let total_duration =
|
||||||
|
Math.abs(snapshot_end_date.getTime() - snapshot_date.getTime()) /
|
||||||
|
1000;
|
||||||
|
size_bytes = response.rootEntry.summ.size;
|
||||||
|
|
||||||
|
driver.ctx.logger.info(
|
||||||
|
`kopia backup finished: snapshot_id=${snapshot_id}, path=${volume_path}, total_duration=${
|
||||||
|
total_duration | 0
|
||||||
|
}s`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`unknown snapthot driver: ${snapshot_driver}`
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
let size_bytes = await driver.getDirectoryUsage(snapshot_path);
|
|
||||||
return {
|
return {
|
||||||
snapshot: {
|
snapshot: {
|
||||||
/**
|
/**
|
||||||
|
|
@ -784,14 +1284,17 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
* is needed to create a volume from this snapshot.
|
* is needed to create a volume from this snapshot.
|
||||||
*/
|
*/
|
||||||
size_bytes,
|
size_bytes,
|
||||||
snapshot_id,
|
snapshot_id: new URLSearchParams({
|
||||||
|
snapshot_driver,
|
||||||
|
snapshot_id,
|
||||||
|
}).toString(),
|
||||||
source_volume_id: source_volume_id,
|
source_volume_id: source_volume_id,
|
||||||
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
|
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
|
||||||
creation_time: {
|
creation_time: {
|
||||||
seconds: Math.round(new Date().getTime() / 1000),
|
seconds: Math.round(snapshot_date.getTime() / 1000),
|
||||||
nanos: 0,
|
nanos: 0,
|
||||||
},
|
},
|
||||||
ready_to_use: true,
|
ready_to_use,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
@ -805,7 +1308,11 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
async DeleteSnapshot(call) {
|
async DeleteSnapshot(call) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
|
|
||||||
const snapshot_id = call.request.snapshot_id;
|
let snapshot_id = call.request.snapshot_id;
|
||||||
|
let snapshot_driver;
|
||||||
|
const config_key = driver.getConfigKey();
|
||||||
|
const instance_id = driver.options.instance_id;
|
||||||
|
let response;
|
||||||
|
|
||||||
if (!snapshot_id) {
|
if (!snapshot_id) {
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
|
|
@ -814,8 +1321,70 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
|
// get parsed variant of driver to allow snapshotter to work with all
|
||||||
await driver.deleteDir(snapshot_path);
|
// drivers simultaneously
|
||||||
|
const parsed_snapshot_id = new URLSearchParams(snapshot_id);
|
||||||
|
if (parsed_snapshot_id.get("snapshot_driver")) {
|
||||||
|
snapshot_id = parsed_snapshot_id.get("snapshot_id");
|
||||||
|
snapshot_driver = parsed_snapshot_id.get("snapshot_driver");
|
||||||
|
} else {
|
||||||
|
snapshot_driver = "filecopy";
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (snapshot_driver) {
|
||||||
|
case "filecopy":
|
||||||
|
{
|
||||||
|
const snapshot_path = driver.getControllerSnapshotPath(snapshot_id);
|
||||||
|
await driver.deleteDir(snapshot_path);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "restic":
|
||||||
|
{
|
||||||
|
let prune = _.get(
|
||||||
|
driver.options[config_key],
|
||||||
|
"snapshots.restic.prune",
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
if (typeof prune != "boolean") {
|
||||||
|
prune = String(prune);
|
||||||
|
if (["true", "yes", "1"].includes(prune.toLowerCase())) {
|
||||||
|
prune = true;
|
||||||
|
} else {
|
||||||
|
prune = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const restic = await driver.getResticClient();
|
||||||
|
|
||||||
|
let options = [];
|
||||||
|
await restic.init();
|
||||||
|
|
||||||
|
// we preempt with this check to prevent locking the repo when snapshot does not exist
|
||||||
|
const snapshot_exists = await restic.snapshot_exists(snapshot_id);
|
||||||
|
if (snapshot_exists) {
|
||||||
|
options = [];
|
||||||
|
if (prune) {
|
||||||
|
options.push("--prune");
|
||||||
|
}
|
||||||
|
options.push(snapshot_id);
|
||||||
|
await restic.forget(options);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case "kopia":
|
||||||
|
{
|
||||||
|
const kopia = await driver.getKopiaClient();
|
||||||
|
let options = [snapshot_id];
|
||||||
|
await kopia.snapshotDelete(options);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`unknown snapthot driver: ${snapshot_driver}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -44,12 +44,11 @@ class ControllerLocalHostpathDriver extends ControllerClientCommonDriver {
|
||||||
return "local-hostpath";
|
return "local-hostpath";
|
||||||
}
|
}
|
||||||
|
|
||||||
getVolumeContext(name) {
|
getVolumeContext(volume_id) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const config_key = driver.getConfigKey();
|
|
||||||
return {
|
return {
|
||||||
node_attach_driver: "hostpath",
|
node_attach_driver: "hostpath",
|
||||||
path: driver.getShareVolumePath(name),
|
path: driver.getShareVolumePath(volume_id),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,13 +13,13 @@ class ControllerLustreClientDriver extends ControllerClientCommonDriver {
|
||||||
return "lustre";
|
return "lustre";
|
||||||
}
|
}
|
||||||
|
|
||||||
getVolumeContext(name) {
|
getVolumeContext(volume_id) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const config_key = driver.getConfigKey();
|
const config_key = driver.getConfigKey();
|
||||||
return {
|
return {
|
||||||
node_attach_driver: "lustre",
|
node_attach_driver: "lustre",
|
||||||
server: this.options[config_key].shareHost,
|
server: this.options[config_key].shareHost,
|
||||||
share: driver.getShareVolumePath(name),
|
share: driver.getShareVolumePath(volume_id),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -13,13 +13,13 @@ class ControllerNfsClientDriver extends ControllerClientCommonDriver {
|
||||||
return "nfs";
|
return "nfs";
|
||||||
}
|
}
|
||||||
|
|
||||||
getVolumeContext(name) {
|
getVolumeContext(volume_id) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const config_key = driver.getConfigKey();
|
const config_key = driver.getConfigKey();
|
||||||
return {
|
return {
|
||||||
node_attach_driver: "nfs",
|
node_attach_driver: "nfs",
|
||||||
server: this.options[config_key].shareHost,
|
server: this.options[config_key].shareHost,
|
||||||
share: driver.getShareVolumePath(name),
|
share: driver.getShareVolumePath(volume_id),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,670 @@
|
||||||
|
const _ = require("lodash");
|
||||||
|
const { CsiBaseDriver } = require("../index");
|
||||||
|
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
|
const GeneralUtils = require("../../utils/general");
|
||||||
|
const { ObjectiveFS } = require("../../utils/objectivefs");
|
||||||
|
const semver = require("semver");
|
||||||
|
const uuidv4 = require("uuid").v4;
|
||||||
|
|
||||||
|
const __REGISTRY_NS__ = "ControllerZfsLocalDriver";
|
||||||
|
const MAX_VOLUME_NAME_LENGTH = 63;
|
||||||
|
|
||||||
|
class ControllerObjectiveFSDriver extends CsiBaseDriver {
|
||||||
|
constructor(ctx, options) {
|
||||||
|
super(...arguments);
|
||||||
|
|
||||||
|
options = options || {};
|
||||||
|
options.service = options.service || {};
|
||||||
|
options.service.identity = options.service.identity || {};
|
||||||
|
options.service.controller = options.service.controller || {};
|
||||||
|
options.service.node = options.service.node || {};
|
||||||
|
|
||||||
|
options.service.identity.capabilities =
|
||||||
|
options.service.identity.capabilities || {};
|
||||||
|
|
||||||
|
options.service.controller.capabilities =
|
||||||
|
options.service.controller.capabilities || {};
|
||||||
|
|
||||||
|
options.service.node.capabilities = options.service.node.capabilities || {};
|
||||||
|
|
||||||
|
if (!("service" in options.service.identity.capabilities)) {
|
||||||
|
this.ctx.logger.debug("setting default identity service caps");
|
||||||
|
|
||||||
|
options.service.identity.capabilities.service = [
|
||||||
|
//"UNKNOWN",
|
||||||
|
"CONTROLLER_SERVICE",
|
||||||
|
//"VOLUME_ACCESSIBILITY_CONSTRAINTS"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!("volume_expansion" in options.service.identity.capabilities)) {
|
||||||
|
this.ctx.logger.debug("setting default identity volume_expansion caps");
|
||||||
|
|
||||||
|
options.service.identity.capabilities.volume_expansion = [
|
||||||
|
//"UNKNOWN",
|
||||||
|
//"ONLINE",
|
||||||
|
//"OFFLINE"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!("rpc" in options.service.controller.capabilities)) {
|
||||||
|
this.ctx.logger.debug("setting default controller caps");
|
||||||
|
|
||||||
|
options.service.controller.capabilities.rpc = [
|
||||||
|
//"UNKNOWN",
|
||||||
|
"CREATE_DELETE_VOLUME",
|
||||||
|
//"PUBLISH_UNPUBLISH_VOLUME",
|
||||||
|
"LIST_VOLUMES",
|
||||||
|
//"GET_CAPACITY",
|
||||||
|
//"CREATE_DELETE_SNAPSHOT",
|
||||||
|
//"LIST_SNAPSHOTS",
|
||||||
|
//"CLONE_VOLUME",
|
||||||
|
//"PUBLISH_READONLY",
|
||||||
|
//"EXPAND_VOLUME",
|
||||||
|
];
|
||||||
|
|
||||||
|
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
|
||||||
|
options.service.controller.capabilities.rpc
|
||||||
|
.push
|
||||||
|
//"VOLUME_CONDITION",
|
||||||
|
//"GET_VOLUME"
|
||||||
|
();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
|
||||||
|
options.service.controller.capabilities.rpc.push(
|
||||||
|
"SINGLE_NODE_MULTI_WRITER"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!("rpc" in options.service.node.capabilities)) {
|
||||||
|
this.ctx.logger.debug("setting default node caps");
|
||||||
|
|
||||||
|
options.service.node.capabilities.rpc = [
|
||||||
|
//"UNKNOWN",
|
||||||
|
"STAGE_UNSTAGE_VOLUME",
|
||||||
|
"GET_VOLUME_STATS",
|
||||||
|
//"EXPAND_VOLUME"
|
||||||
|
];
|
||||||
|
|
||||||
|
if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) {
|
||||||
|
//options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) {
|
||||||
|
options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER");
|
||||||
|
/**
|
||||||
|
* This is for volumes that support a mount time gid such as smb or fat
|
||||||
|
*/
|
||||||
|
//options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getObjectiveFSClient() {
|
||||||
|
const driver = this;
|
||||||
|
return this.ctx.registry.getAsync(
|
||||||
|
`${__REGISTRY_NS__}:objectivefsclient`,
|
||||||
|
async () => {
|
||||||
|
const options = {};
|
||||||
|
options.sudo = _.get(
|
||||||
|
driver.options,
|
||||||
|
"objectivefs.cli.sudoEnabled",
|
||||||
|
false
|
||||||
|
);
|
||||||
|
|
||||||
|
options.pool = _.get(driver.options, "objectivefs.pool");
|
||||||
|
|
||||||
|
return new ObjectiveFS({
|
||||||
|
...options,
|
||||||
|
env: _.get(driver.options, "objectivefs.env", {}),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @returns Array
|
||||||
|
*/
|
||||||
|
getAccessModes(capability) {
|
||||||
|
let access_modes = _.get(this.options, "csi.access_modes", null);
|
||||||
|
if (access_modes !== null) {
|
||||||
|
return access_modes;
|
||||||
|
}
|
||||||
|
|
||||||
|
access_modes = [
|
||||||
|
"UNKNOWN",
|
||||||
|
"SINGLE_NODE_WRITER",
|
||||||
|
"SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
|
||||||
|
"SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
|
||||||
|
"SINGLE_NODE_READER_ONLY",
|
||||||
|
"MULTI_NODE_READER_ONLY",
|
||||||
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
|
"MULTI_NODE_MULTI_WRITER",
|
||||||
|
];
|
||||||
|
|
||||||
|
if (
|
||||||
|
capability.access_type == "block" &&
|
||||||
|
!access_modes.includes("MULTI_NODE_MULTI_WRITER")
|
||||||
|
) {
|
||||||
|
access_modes.push("MULTI_NODE_MULTI_WRITER");
|
||||||
|
}
|
||||||
|
|
||||||
|
return access_modes;
|
||||||
|
}
|
||||||
|
|
||||||
|
getFsTypes() {
|
||||||
|
return ["fuse.objectivefs", "objectivefs"];
|
||||||
|
}
|
||||||
|
|
||||||
|
assertCapabilities(capabilities) {
|
||||||
|
const driver = this;
|
||||||
|
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
|
||||||
|
|
||||||
|
let message = null;
|
||||||
|
let fs_types = driver.getFsTypes();
|
||||||
|
const valid = capabilities.every((capability) => {
|
||||||
|
if (capability.access_type != "mount") {
|
||||||
|
message = `invalid access_type ${capability.access_type}`;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
capability.mount.fs_type &&
|
||||||
|
!fs_types.includes(capability.mount.fs_type)
|
||||||
|
) {
|
||||||
|
message = `invalid fs_type ${capability.mount.fs_type}`;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
!this.getAccessModes(capability).includes(capability.access_mode.mode)
|
||||||
|
) {
|
||||||
|
message = `invalid access_mode, ${capability.access_mode.mode}`;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
|
||||||
|
return { valid, message };
|
||||||
|
}
|
||||||
|
|
||||||
|
async getVolumeStatus(entry) {
|
||||||
|
const driver = this;
|
||||||
|
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
|
||||||
|
const volume_id = entry.NAME.replace(object_store, "").split("/")[1];
|
||||||
|
|
||||||
|
if (!!!semver.satisfies(driver.ctx.csiVersion, ">=1.2.0")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let abnormal = false;
|
||||||
|
let message = "OK";
|
||||||
|
let volume_status = {};
|
||||||
|
|
||||||
|
//LIST_VOLUMES_PUBLISHED_NODES
|
||||||
|
if (
|
||||||
|
semver.satisfies(driver.ctx.csiVersion, ">=1.2.0") &&
|
||||||
|
driver.options.service.controller.capabilities.rpc.includes(
|
||||||
|
"LIST_VOLUMES_PUBLISHED_NODES"
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
// TODO: let drivers fill this in
|
||||||
|
volume_status.published_node_ids = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
//VOLUME_CONDITION
|
||||||
|
if (
|
||||||
|
semver.satisfies(driver.ctx.csiVersion, ">=1.3.0") &&
|
||||||
|
driver.options.service.controller.capabilities.rpc.includes(
|
||||||
|
"VOLUME_CONDITION"
|
||||||
|
)
|
||||||
|
) {
|
||||||
|
// TODO: let drivers fill ths in
|
||||||
|
volume_condition = { abnormal, message };
|
||||||
|
volume_status.volume_condition = volume_condition;
|
||||||
|
}
|
||||||
|
|
||||||
|
return volume_status;
|
||||||
|
}
|
||||||
|
|
||||||
|
async populateCsiVolumeFromData(entry) {
|
||||||
|
const driver = this;
|
||||||
|
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
|
||||||
|
let filesystem = entry.NAME.replace(object_store, "");
|
||||||
|
|
||||||
|
let volume_content_source;
|
||||||
|
let volume_context = {
|
||||||
|
provisioner_driver: driver.options.driver,
|
||||||
|
node_attach_driver: "objectivefs",
|
||||||
|
filesystem,
|
||||||
|
object_store,
|
||||||
|
"env.OBJECTSTORE": object_store,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (driver.options.instance_id) {
|
||||||
|
volume_context["provisioner_driver_instance_id"] =
|
||||||
|
driver.options.instance_id;
|
||||||
|
}
|
||||||
|
let accessible_topology;
|
||||||
|
|
||||||
|
let volume = {
|
||||||
|
volume_id: filesystem.split("/")[1],
|
||||||
|
capacity_bytes: 0,
|
||||||
|
content_source: volume_content_source,
|
||||||
|
volume_context,
|
||||||
|
accessible_topology,
|
||||||
|
};
|
||||||
|
|
||||||
|
return volume;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Ensure sane options are used etc
|
||||||
|
* true = ready
|
||||||
|
* false = not ready, but progressiong towards ready
|
||||||
|
* throw error = faulty setup
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async Probe(call) {
|
||||||
|
const driver = this;
|
||||||
|
const pool = _.get(driver.options, "objectivefs.pool");
|
||||||
|
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
|
||||||
|
|
||||||
|
if (driver.ctx.args.csiMode.includes("controller")) {
|
||||||
|
if (!pool) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`objectivefs.pool not configured`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!object_store) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`env.OBJECTSTORE not configured`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return { ready: { value: true } };
|
||||||
|
} else {
|
||||||
|
return { ready: { value: true } };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create an objectivefs filesystem as a new volume
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async CreateVolume(call) {
|
||||||
|
const driver = this;
|
||||||
|
const ofsClient = await driver.getObjectiveFSClient();
|
||||||
|
const pool = _.get(driver.options, "objectivefs.pool");
|
||||||
|
const object_store = _.get(driver.options, "objectivefs.env.OBJECTSTORE");
|
||||||
|
const parameters = call.request.parameters;
|
||||||
|
|
||||||
|
if (!pool) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`objectivefs.pool not configured`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!object_store) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`env.OBJECTSTORE not configured`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const context_env = {};
|
||||||
|
for (const key in parameters) {
|
||||||
|
if (key.startsWith("env.")) {
|
||||||
|
context_env[key] = parameters[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
context_env["env.OBJECTSTORE"] = object_store;
|
||||||
|
|
||||||
|
// filesystem names are always lower-cased by ofs
|
||||||
|
let volume_id = await driver.getVolumeIdFromCall(call);
|
||||||
|
let volume_content_source = call.request.volume_content_source;
|
||||||
|
volume_id = volume_id.toLowerCase();
|
||||||
|
const filesystem = `${pool}/${volume_id}`;
|
||||||
|
|
||||||
|
if (volume_id.length >= MAX_VOLUME_NAME_LENGTH) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`derived volume_id ${volume_id} is too long for objectivefs`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
call.request.volume_capabilities &&
|
||||||
|
call.request.volume_capabilities.length > 0
|
||||||
|
) {
|
||||||
|
const result = this.assertCapabilities(call.request.volume_capabilities);
|
||||||
|
if (result.valid !== true) {
|
||||||
|
throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
"missing volume_capabilities"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
!call.request.capacity_range ||
|
||||||
|
Object.keys(call.request.capacity_range).length === 0
|
||||||
|
) {
|
||||||
|
call.request.capacity_range = {
|
||||||
|
required_bytes: 1073741824, // meaningless
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (
|
||||||
|
call.request.capacity_range.required_bytes > 0 &&
|
||||||
|
call.request.capacity_range.limit_bytes > 0 &&
|
||||||
|
call.request.capacity_range.required_bytes >
|
||||||
|
call.request.capacity_range.limit_bytes
|
||||||
|
) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.OUT_OF_RANGE,
|
||||||
|
`required_bytes is greather than limit_bytes`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let capacity_bytes =
|
||||||
|
call.request.capacity_range.required_bytes ||
|
||||||
|
call.request.capacity_range.limit_bytes;
|
||||||
|
|
||||||
|
if (!capacity_bytes) {
|
||||||
|
//should never happen, value must be set
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`volume capacity is required (either required_bytes or limit_bytes)`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensure *actual* capacity is not greater than limit
|
||||||
|
if (
|
||||||
|
call.request.capacity_range.limit_bytes &&
|
||||||
|
call.request.capacity_range.limit_bytes > 0 &&
|
||||||
|
capacity_bytes > call.request.capacity_range.limit_bytes
|
||||||
|
) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.OUT_OF_RANGE,
|
||||||
|
`required volume capacity is greater than limit`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (volume_content_source) {
|
||||||
|
//should never happen, cannot clone with this driver
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`cloning is not enabled`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
await ofsClient.create({}, filesystem, ["-f"]);
|
||||||
|
|
||||||
|
let volume_context = {
|
||||||
|
provisioner_driver: driver.options.driver,
|
||||||
|
node_attach_driver: "objectivefs",
|
||||||
|
filesystem,
|
||||||
|
...context_env,
|
||||||
|
};
|
||||||
|
|
||||||
|
if (driver.options.instance_id) {
|
||||||
|
volume_context["provisioner_driver_instance_id"] =
|
||||||
|
driver.options.instance_id;
|
||||||
|
}
|
||||||
|
|
||||||
|
const res = {
|
||||||
|
volume: {
|
||||||
|
volume_id,
|
||||||
|
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||||
|
capacity_bytes: 0,
|
||||||
|
content_source: volume_content_source,
|
||||||
|
volume_context,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Delete a volume
|
||||||
|
*
|
||||||
|
* Deleting a volume consists of the following steps:
|
||||||
|
* 1. delete directory
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async DeleteVolume(call) {
|
||||||
|
const driver = this;
|
||||||
|
const ofsClient = await driver.getObjectiveFSClient();
|
||||||
|
const pool = _.get(driver.options, "objectivefs.pool");
|
||||||
|
|
||||||
|
let volume_id = call.request.volume_id;
|
||||||
|
if (!volume_id) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`volume_id is required`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteStrategy
|
||||||
|
const delete_strategy = _.get(
|
||||||
|
driver.options,
|
||||||
|
"_private.csi.volume.deleteStrategy",
|
||||||
|
""
|
||||||
|
);
|
||||||
|
|
||||||
|
if (delete_strategy == "retain") {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
volume_id = volume_id.toLowerCase();
|
||||||
|
const filesystem = `${pool}/${volume_id}`;
|
||||||
|
await ofsClient.destroy({}, filesystem, []);
|
||||||
|
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async ControllerExpandVolume(call) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNIMPLEMENTED,
|
||||||
|
`operation not supported by driver`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO: consider volume_capabilities?
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async GetCapacity(call) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNIMPLEMENTED,
|
||||||
|
`operation not supported by driver`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* TODO: check capability to ensure not asking about block volumes
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async ListVolumes(call) {
|
||||||
|
const driver = this;
|
||||||
|
const ofsClient = await driver.getObjectiveFSClient();
|
||||||
|
const pool = _.get(driver.options, "objectivefs.pool");
|
||||||
|
|
||||||
|
let entries = [];
|
||||||
|
let entries_length = 0;
|
||||||
|
let next_token;
|
||||||
|
let uuid;
|
||||||
|
let response;
|
||||||
|
|
||||||
|
const max_entries = call.request.max_entries;
|
||||||
|
const starting_token = call.request.starting_token;
|
||||||
|
|
||||||
|
// get data from cache and return immediately
|
||||||
|
if (starting_token) {
|
||||||
|
let parts = starting_token.split(":");
|
||||||
|
uuid = parts[0];
|
||||||
|
let start_position = parseInt(parts[1]);
|
||||||
|
let end_position;
|
||||||
|
if (max_entries > 0) {
|
||||||
|
end_position = start_position + max_entries;
|
||||||
|
}
|
||||||
|
entries = this.ctx.cache.get(`ListVolumes:result:${uuid}`);
|
||||||
|
if (entries) {
|
||||||
|
entries_length = entries.length;
|
||||||
|
entries = entries.slice(start_position, end_position);
|
||||||
|
if (max_entries > 0 && end_position > entries_length) {
|
||||||
|
next_token = `${uuid}:${end_position}`;
|
||||||
|
} else {
|
||||||
|
next_token = null;
|
||||||
|
}
|
||||||
|
const data = {
|
||||||
|
entries: entries,
|
||||||
|
next_token: next_token,
|
||||||
|
};
|
||||||
|
|
||||||
|
return data;
|
||||||
|
} else {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.ABORTED,
|
||||||
|
`invalid starting_token: ${starting_token}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
entries = [];
|
||||||
|
const list_entries = await ofsClient.list({});
|
||||||
|
for (const entry of list_entries) {
|
||||||
|
if (entry.KIND != "ofs") {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let volume = await driver.populateCsiVolumeFromData(entry);
|
||||||
|
if (volume) {
|
||||||
|
let status = await driver.getVolumeStatus(entry);
|
||||||
|
entries.push({
|
||||||
|
volume,
|
||||||
|
status,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (max_entries && entries.length > max_entries) {
|
||||||
|
uuid = uuidv4();
|
||||||
|
this.ctx.cache.set(`ListVolumes:result:${uuid}`, entries);
|
||||||
|
next_token = `${uuid}:${max_entries}`;
|
||||||
|
entries = entries.slice(0, max_entries);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = {
|
||||||
|
entries: entries,
|
||||||
|
next_token: next_token,
|
||||||
|
};
|
||||||
|
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async ListSnapshots(call) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNIMPLEMENTED,
|
||||||
|
`operation not supported by driver`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async CreateSnapshot(call) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNIMPLEMENTED,
|
||||||
|
`operation not supported by driver`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* In addition, if clones have been created from a snapshot, then they must
|
||||||
|
* be destroyed before the snapshot can be destroyed.
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async DeleteSnapshot(call) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNIMPLEMENTED,
|
||||||
|
`operation not supported by driver`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async ValidateVolumeCapabilities(call) {
|
||||||
|
const driver = this;
|
||||||
|
const ofsClient = await driver.getObjectiveFSClient();
|
||||||
|
const pool = _.get(driver.options, "objectivefs.pool");
|
||||||
|
|
||||||
|
const volume_id = call.request.volume_id;
|
||||||
|
if (!volume_id) {
|
||||||
|
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing volume_id`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const filesystem = `${pool}/${volume_id}`;
|
||||||
|
const entries = await ofsClient.list({}, filesystem);
|
||||||
|
const exists = entries.some((entry) => {
|
||||||
|
return entry.NAME.endsWith(filesystem) && entry.KIND == "ofs";
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!exists) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.NOT_FOUND,
|
||||||
|
`invalid volume_id: ${volume_id}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const capabilities = call.request.volume_capabilities;
|
||||||
|
if (!capabilities || capabilities.length === 0) {
|
||||||
|
throw new GrpcError(grpc.status.INVALID_ARGUMENT, `missing capabilities`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = this.assertCapabilities(call.request.volume_capabilities);
|
||||||
|
|
||||||
|
if (result.valid !== true) {
|
||||||
|
return { message: result.message };
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
confirmed: {
|
||||||
|
volume_context: call.request.volume_context,
|
||||||
|
volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
|
||||||
|
parameters: call.request.parameters,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports.ControllerObjectiveFSDriver = ControllerObjectiveFSDriver;
|
||||||
|
|
@ -13,13 +13,13 @@ class ControllerSmbClientDriver extends ControllerClientCommonDriver {
|
||||||
return "smb";
|
return "smb";
|
||||||
}
|
}
|
||||||
|
|
||||||
getVolumeContext(name) {
|
getVolumeContext(volume_id) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const config_key = driver.getConfigKey();
|
const config_key = driver.getConfigKey();
|
||||||
return {
|
return {
|
||||||
node_attach_driver: "smb",
|
node_attach_driver: "smb",
|
||||||
server: this.options[config_key].shareHost,
|
server: this.options[config_key].shareHost,
|
||||||
share: driver.stripLeadingSlash(driver.getShareVolumePath(name)),
|
share: driver.stripLeadingSlash(driver.getShareVolumePath(volume_id)),
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ const http = require("http");
|
||||||
const https = require("https");
|
const https = require("https");
|
||||||
const { axios_request, stringify } = require("../../../utils/general");
|
const { axios_request, stringify } = require("../../../utils/general");
|
||||||
const Mutex = require("async-mutex").Mutex;
|
const Mutex = require("async-mutex").Mutex;
|
||||||
const registry = require("../../../utils/registry");
|
|
||||||
const { GrpcError, grpc } = require("../../../utils/grpc");
|
const { GrpcError, grpc } = require("../../../utils/grpc");
|
||||||
|
|
||||||
const USER_AGENT = "democratic-csi";
|
const USER_AGENT = "democratic-csi";
|
||||||
|
|
@ -95,7 +94,7 @@ class SynologyHttpClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
getHttpAgent() {
|
getHttpAgent() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:http_agent`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_agent`, () => {
|
||||||
return new http.Agent({
|
return new http.Agent({
|
||||||
keepAlive: true,
|
keepAlive: true,
|
||||||
maxSockets: Infinity,
|
maxSockets: Infinity,
|
||||||
|
|
@ -105,7 +104,7 @@ class SynologyHttpClient {
|
||||||
}
|
}
|
||||||
|
|
||||||
getHttpsAgent() {
|
getHttpsAgent() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:https_agent`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:https_agent`, () => {
|
||||||
return new https.Agent({
|
return new https.Agent({
|
||||||
keepAlive: true,
|
keepAlive: true,
|
||||||
maxSockets: Infinity,
|
maxSockets: Infinity,
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ const { CsiBaseDriver } = require("../index");
|
||||||
const GeneralUtils = require("../../utils/general");
|
const GeneralUtils = require("../../utils/general");
|
||||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
const Handlebars = require("handlebars");
|
const Handlebars = require("handlebars");
|
||||||
const registry = require("../../utils/registry");
|
|
||||||
const SynologyHttpClient = require("./http").SynologyHttpClient;
|
const SynologyHttpClient = require("./http").SynologyHttpClient;
|
||||||
const semver = require("semver");
|
const semver = require("semver");
|
||||||
const yaml = require("js-yaml");
|
const yaml = require("js-yaml");
|
||||||
|
|
@ -115,7 +114,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getHttpClient() {
|
async getHttpClient() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:http_client`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_client`, () => {
|
||||||
return new SynologyHttpClient(this.options.httpConnection);
|
return new SynologyHttpClient(this.options.httpConnection);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -176,8 +175,8 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
buildIscsiName(name) {
|
buildIscsiName(volume_id) {
|
||||||
let iscsiName = name;
|
let iscsiName = volume_id;
|
||||||
if (this.options.iscsi.namePrefix) {
|
if (this.options.iscsi.namePrefix) {
|
||||||
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
iscsiName = this.options.iscsi.namePrefix + iscsiName;
|
||||||
}
|
}
|
||||||
|
|
@ -227,6 +226,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
"MULTI_NODE_MULTI_WRITER",
|
"MULTI_NODE_MULTI_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
case "volume":
|
case "volume":
|
||||||
access_modes = [
|
access_modes = [
|
||||||
"UNKNOWN",
|
"UNKNOWN",
|
||||||
|
|
@ -237,6 +237,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
"MULTI_NODE_READER_ONLY",
|
"MULTI_NODE_READER_ONLY",
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
|
|
@ -322,16 +323,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const httpClient = await driver.getHttpClient();
|
const httpClient = await driver.getHttpClient();
|
||||||
|
|
||||||
let name = call.request.name;
|
let volume_id = await driver.getVolumeIdFromCall(call);
|
||||||
let volume_content_source = call.request.volume_content_source;
|
let volume_content_source = call.request.volume_content_source;
|
||||||
|
|
||||||
if (!name) {
|
|
||||||
throw new GrpcError(
|
|
||||||
grpc.status.INVALID_ARGUMENT,
|
|
||||||
`volume name is required`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
call.request.volume_capabilities &&
|
call.request.volume_capabilities &&
|
||||||
call.request.volume_capabilities.length > 0
|
call.request.volume_capabilities.length > 0
|
||||||
|
|
@ -412,7 +406,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
break;
|
break;
|
||||||
case "iscsi":
|
case "iscsi":
|
||||||
let iscsiName = driver.buildIscsiName(name);
|
let iscsiName = driver.buildIscsiName(volume_id);
|
||||||
let lunTemplate;
|
let lunTemplate;
|
||||||
let targetTemplate;
|
let targetTemplate;
|
||||||
let data;
|
let data;
|
||||||
|
|
@ -668,7 +662,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
const res = {
|
const res = {
|
||||||
volume: {
|
volume: {
|
||||||
volume_id: name,
|
volume_id,
|
||||||
capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||||
content_source: volume_content_source,
|
content_source: volume_content_source,
|
||||||
volume_context,
|
volume_context,
|
||||||
|
|
@ -687,15 +681,26 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const httpClient = await driver.getHttpClient();
|
const httpClient = await driver.getHttpClient();
|
||||||
|
|
||||||
let name = call.request.volume_id;
|
let volume_id = call.request.volume_id;
|
||||||
|
|
||||||
if (!name) {
|
if (!volume_id) {
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
grpc.status.INVALID_ARGUMENT,
|
grpc.status.INVALID_ARGUMENT,
|
||||||
`volume_id is required`
|
`volume_id is required`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// deleteStrategy
|
||||||
|
const delete_strategy = _.get(
|
||||||
|
driver.options,
|
||||||
|
"_private.csi.volume.deleteStrategy",
|
||||||
|
""
|
||||||
|
);
|
||||||
|
|
||||||
|
if (delete_strategy == "retain") {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
let response;
|
let response;
|
||||||
|
|
||||||
switch (driver.getDriverShareType()) {
|
switch (driver.getDriverShareType()) {
|
||||||
|
|
@ -716,7 +721,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
case "iscsi":
|
case "iscsi":
|
||||||
//await httpClient.DeleteAllLuns();
|
//await httpClient.DeleteAllLuns();
|
||||||
|
|
||||||
let iscsiName = driver.buildIscsiName(name);
|
let iscsiName = driver.buildIscsiName(volume_id);
|
||||||
let iqn = driver.options.iscsi.baseiqn + iscsiName;
|
let iqn = driver.options.iscsi.baseiqn + iscsiName;
|
||||||
|
|
||||||
let target = await httpClient.GetTargetByIQN(iqn);
|
let target = await httpClient.GetTargetByIQN(iqn);
|
||||||
|
|
@ -784,9 +789,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const httpClient = await driver.getHttpClient();
|
const httpClient = await driver.getHttpClient();
|
||||||
|
|
||||||
let name = call.request.volume_id;
|
let volume_id = call.request.volume_id;
|
||||||
|
|
||||||
if (!name) {
|
if (!volume_id) {
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
grpc.status.INVALID_ARGUMENT,
|
grpc.status.INVALID_ARGUMENT,
|
||||||
`volume_id is required`
|
`volume_id is required`
|
||||||
|
|
@ -848,7 +853,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
||||||
break;
|
break;
|
||||||
case "iscsi":
|
case "iscsi":
|
||||||
node_expansion_required = true;
|
node_expansion_required = true;
|
||||||
let iscsiName = driver.buildIscsiName(name);
|
let iscsiName = driver.buildIscsiName(volume_id);
|
||||||
|
|
||||||
response = await httpClient.GetLunUUIDByName(iscsiName);
|
response = await httpClient.GetLunUUIDByName(iscsiName);
|
||||||
await httpClient.ExpandISCSILun(response, capacity_bytes);
|
await httpClient.ExpandISCSILun(response, capacity_bytes);
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ const _ = require("lodash");
|
||||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
const GeneralUtils = require("../../utils/general");
|
const GeneralUtils = require("../../utils/general");
|
||||||
const registry = require("../../utils/registry");
|
|
||||||
const LocalCliExecClient =
|
const LocalCliExecClient =
|
||||||
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
||||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||||
|
|
@ -15,7 +14,7 @@ const NVMEOF_ASSETS_NAME_PROPERTY_NAME = "democratic-csi:nvmeof_assets_name";
|
||||||
const __REGISTRY_NS__ = "ControllerZfsGenericDriver";
|
const __REGISTRY_NS__ = "ControllerZfsGenericDriver";
|
||||||
class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
||||||
getExecClient() {
|
getExecClient() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
||||||
if (this.options.sshConnection) {
|
if (this.options.sshConnection) {
|
||||||
return new SshClient({
|
return new SshClient({
|
||||||
logger: this.ctx.logger,
|
logger: this.ctx.logger,
|
||||||
|
|
@ -30,7 +29,7 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getZetabyte() {
|
async getZetabyte() {
|
||||||
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
||||||
const execClient = this.getExecClient();
|
const execClient = this.getExecClient();
|
||||||
const options = {};
|
const options = {};
|
||||||
if (this.options.sshConnection) {
|
if (this.options.sshConnection) {
|
||||||
|
|
@ -219,6 +218,22 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
||||||
basename = this.options.iscsi.shareStrategyTargetCli.basename;
|
basename = this.options.iscsi.shareStrategyTargetCli.basename;
|
||||||
let setAttributesText = "";
|
let setAttributesText = "";
|
||||||
let setAuthText = "";
|
let setAuthText = "";
|
||||||
|
let setBlockAttributesText = "";
|
||||||
|
|
||||||
|
if (this.options.iscsi.shareStrategyTargetCli.block) {
|
||||||
|
if (this.options.iscsi.shareStrategyTargetCli.block.attributes) {
|
||||||
|
for (const attributeName in this.options.iscsi
|
||||||
|
.shareStrategyTargetCli.block.attributes) {
|
||||||
|
const attributeValue =
|
||||||
|
this.options.iscsi.shareStrategyTargetCli.block.attributes[
|
||||||
|
attributeName
|
||||||
|
];
|
||||||
|
setBlockAttributesText += "\n";
|
||||||
|
setBlockAttributesText += `set attribute ${attributeName}=${attributeValue}`;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (this.options.iscsi.shareStrategyTargetCli.tpg) {
|
if (this.options.iscsi.shareStrategyTargetCli.tpg) {
|
||||||
if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) {
|
if (this.options.iscsi.shareStrategyTargetCli.tpg.attributes) {
|
||||||
for (const attributeName in this.options.iscsi
|
for (const attributeName in this.options.iscsi
|
||||||
|
|
@ -263,6 +278,8 @@ ${setAuthText}
|
||||||
# create extent
|
# create extent
|
||||||
cd /backstores/block
|
cd /backstores/block
|
||||||
create ${assetName} /dev/${extentDiskName}
|
create ${assetName} /dev/${extentDiskName}
|
||||||
|
cd /backstores/block/${assetName}
|
||||||
|
${setBlockAttributesText}
|
||||||
|
|
||||||
# add extent to target/tpg
|
# add extent to target/tpg
|
||||||
cd /iscsi/${basename}:${assetName}/tpg1/luns
|
cd /iscsi/${basename}:${assetName}/tpg1/luns
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
const GeneralUtils = require("../../utils/general");
|
const GeneralUtils = require("../../utils/general");
|
||||||
const LocalCliExecClient =
|
const LocalCliExecClient =
|
||||||
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
require("../../utils/zfs_local_exec_client").LocalCliClient;
|
||||||
const registry = require("../../utils/registry");
|
|
||||||
const { Zetabyte } = require("../../utils/zfs");
|
const { Zetabyte } = require("../../utils/zfs");
|
||||||
|
|
||||||
const ZFS_ASSET_NAME_PROPERTY_NAME = "zfs_asset_name";
|
const ZFS_ASSET_NAME_PROPERTY_NAME = "zfs_asset_name";
|
||||||
|
|
@ -33,7 +32,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
getExecClient() {
|
getExecClient() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
||||||
return new LocalCliExecClient({
|
return new LocalCliExecClient({
|
||||||
logger: this.ctx.logger,
|
logger: this.ctx.logger,
|
||||||
});
|
});
|
||||||
|
|
@ -41,7 +40,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getZetabyte() {
|
async getZetabyte() {
|
||||||
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
||||||
const execClient = this.getExecClient();
|
const execClient = this.getExecClient();
|
||||||
|
|
||||||
const options = {};
|
const options = {};
|
||||||
|
|
@ -129,6 +128,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
"MULTI_NODE_MULTI_WRITER",
|
"MULTI_NODE_MULTI_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
case "volume":
|
case "volume":
|
||||||
access_modes = [
|
access_modes = [
|
||||||
"UNKNOWN",
|
"UNKNOWN",
|
||||||
|
|
@ -140,6 +140,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
"MULTI_NODE_MULTI_WRITER",
|
"MULTI_NODE_MULTI_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
|
|
|
||||||
|
|
@ -226,6 +226,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
"MULTI_NODE_MULTI_WRITER",
|
"MULTI_NODE_MULTI_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
case "volume":
|
case "volume":
|
||||||
access_modes = [
|
access_modes = [
|
||||||
"UNKNOWN",
|
"UNKNOWN",
|
||||||
|
|
@ -236,6 +237,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
"MULTI_NODE_READER_ONLY",
|
"MULTI_NODE_READER_ONLY",
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
|
|
@ -615,9 +617,9 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return { ready: { value: true } };
|
return super.Probe(...arguments);
|
||||||
} else {
|
} else {
|
||||||
return { ready: { value: true } };
|
return super.Probe(...arguments);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -642,7 +644,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
|
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
|
||||||
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
|
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
|
||||||
let name = call.request.name;
|
let name = call.request.name;
|
||||||
let volume_id = await driver.getVolumeIdFromName(name);
|
let volume_id = await driver.getVolumeIdFromCall(call);
|
||||||
let volume_content_source = call.request.volume_content_source;
|
let volume_content_source = call.request.volume_content_source;
|
||||||
|
|
||||||
if (!datasetParentName) {
|
if (!datasetParentName) {
|
||||||
|
|
@ -652,13 +654,6 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!name) {
|
|
||||||
throw new GrpcError(
|
|
||||||
grpc.status.INVALID_ARGUMENT,
|
|
||||||
`volume name is required`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
call.request.volume_capabilities &&
|
call.request.volume_capabilities &&
|
||||||
call.request.volume_capabilities.length > 0
|
call.request.volume_capabilities.length > 0
|
||||||
|
|
@ -1195,11 +1190,30 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
// this should be already set, but when coming from a volume source
|
// this should be already set, but when coming from a volume source
|
||||||
// it may not match that of the source
|
// it may not match that of the source
|
||||||
// TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured
|
|
||||||
properties.volsize = capacity_bytes;
|
properties.volsize = capacity_bytes;
|
||||||
|
|
||||||
//dedup
|
// dedup
|
||||||
//compression
|
// on, off, verify
|
||||||
|
// zfs set dedup=on tank/home
|
||||||
|
// restore default must use the below
|
||||||
|
// zfs inherit [-rS] property filesystem|volume|snapshot…
|
||||||
|
if (
|
||||||
|
(typeof this.options.zfs.zvolDedup === "string" ||
|
||||||
|
this.options.zfs.zvolDedup instanceof String) &&
|
||||||
|
this.options.zfs.zvolDedup.length > 0
|
||||||
|
) {
|
||||||
|
properties.dedup = this.options.zfs.zvolDedup;
|
||||||
|
}
|
||||||
|
|
||||||
|
// compression
|
||||||
|
// lz4, gzip-9, etc
|
||||||
|
if (
|
||||||
|
(typeof this.options.zfs.zvolCompression === "string" ||
|
||||||
|
this.options.zfs.zvolCompression instanceof String) &&
|
||||||
|
this.options.zfs.zvolCompression > 0
|
||||||
|
) {
|
||||||
|
properties.compression = this.options.zfs.zvolCompression;
|
||||||
|
}
|
||||||
|
|
||||||
if (setProps) {
|
if (setProps) {
|
||||||
await zb.zfs.set(datasetName, properties);
|
await zb.zfs.set(datasetName, properties);
|
||||||
|
|
@ -1302,6 +1316,17 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
driver.ctx.logger.debug("dataset properties: %j", properties);
|
driver.ctx.logger.debug("dataset properties: %j", properties);
|
||||||
|
|
||||||
|
// deleteStrategy
|
||||||
|
const delete_strategy = _.get(
|
||||||
|
driver.options,
|
||||||
|
"_private.csi.volume.deleteStrategy",
|
||||||
|
""
|
||||||
|
);
|
||||||
|
|
||||||
|
if (delete_strategy == "retain") {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
// remove share resources
|
// remove share resources
|
||||||
await this.deleteShare(call, datasetName);
|
await this.deleteShare(call, datasetName);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -12,6 +12,7 @@ const {
|
||||||
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
|
const { ControllerNfsClientDriver } = require("./controller-nfs-client");
|
||||||
const { ControllerSmbClientDriver } = require("./controller-smb-client");
|
const { ControllerSmbClientDriver } = require("./controller-smb-client");
|
||||||
const { ControllerLustreClientDriver } = require("./controller-lustre-client");
|
const { ControllerLustreClientDriver } = require("./controller-lustre-client");
|
||||||
|
const { ControllerObjectiveFSDriver } = require("./controller-objectivefs");
|
||||||
const { ControllerSynologyDriver } = require("./controller-synology");
|
const { ControllerSynologyDriver } = require("./controller-synology");
|
||||||
const { NodeManualDriver } = require("./node-manual");
|
const { NodeManualDriver } = require("./node-manual");
|
||||||
|
|
||||||
|
|
@ -50,6 +51,8 @@ function factory(ctx, options) {
|
||||||
return new ControllerLocalHostpathDriver(ctx, options);
|
return new ControllerLocalHostpathDriver(ctx, options);
|
||||||
case "lustre-client":
|
case "lustre-client":
|
||||||
return new ControllerLustreClientDriver(ctx, options);
|
return new ControllerLustreClientDriver(ctx, options);
|
||||||
|
case "objectivefs":
|
||||||
|
return new ControllerObjectiveFSDriver(ctx, options);
|
||||||
case "node-manual":
|
case "node-manual":
|
||||||
return new NodeManualDriver(ctx, options);
|
return new NodeManualDriver(ctx, options);
|
||||||
default:
|
default:
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,6 @@ const { CsiBaseDriver } = require("../index");
|
||||||
const HttpClient = require("./http").Client;
|
const HttpClient = require("./http").Client;
|
||||||
const TrueNASApiClient = require("./http/api").Api;
|
const TrueNASApiClient = require("./http/api").Api;
|
||||||
const { Zetabyte } = require("../../utils/zfs");
|
const { Zetabyte } = require("../../utils/zfs");
|
||||||
const registry = require("../../utils/registry");
|
|
||||||
const GeneralUtils = require("../../utils/general");
|
const GeneralUtils = require("../../utils/general");
|
||||||
|
|
||||||
const Handlebars = require("handlebars");
|
const Handlebars = require("handlebars");
|
||||||
|
|
@ -156,7 +155,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
* @returns
|
* @returns
|
||||||
*/
|
*/
|
||||||
async getZetabyte() {
|
async getZetabyte() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:zb`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
|
||||||
return new Zetabyte({
|
return new Zetabyte({
|
||||||
executor: {
|
executor: {
|
||||||
spawn: function () {
|
spawn: function () {
|
||||||
|
|
@ -183,8 +182,17 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
const apiVersion = httpClient.getApiVersion();
|
const apiVersion = httpClient.getApiVersion();
|
||||||
const zb = await this.getZetabyte();
|
const zb = await this.getZetabyte();
|
||||||
const truenasVersion = semver.coerce(
|
const truenasVersion = semver.coerce(
|
||||||
await httpApiClient.getSystemVersionMajorMinor()
|
await httpApiClient.getSystemVersionMajorMinor(),
|
||||||
|
{ loose: true }
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (!truenasVersion) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNKNOWN,
|
||||||
|
`unable to detect TrueNAS version`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
const isScale = await httpApiClient.getIsScale();
|
const isScale = await httpApiClient.getIsScale();
|
||||||
|
|
||||||
let volume_context;
|
let volume_context;
|
||||||
|
|
@ -265,6 +273,11 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isScale && semver.satisfies(truenasVersion, ">=23.10")) {
|
||||||
|
delete share.quiet;
|
||||||
|
delete share.nfs_quiet;
|
||||||
|
}
|
||||||
|
|
||||||
if (isScale && semver.satisfies(truenasVersion, ">=22.12")) {
|
if (isScale && semver.satisfies(truenasVersion, ">=22.12")) {
|
||||||
share.path = share.paths[0];
|
share.path = share.paths[0];
|
||||||
delete share.paths;
|
delete share.paths;
|
||||||
|
|
@ -680,6 +693,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
// According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.'
|
// According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.'
|
||||||
// https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name
|
// https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name
|
||||||
// https://tools.ietf.org/html/rfc3720
|
// https://tools.ietf.org/html/rfc3720
|
||||||
|
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
iscsiName = iscsiName.toLowerCase();
|
iscsiName = iscsiName.toLowerCase();
|
||||||
|
|
||||||
let extentDiskName = "zvol/" + datasetName;
|
let extentDiskName = "zvol/" + datasetName;
|
||||||
|
|
@ -697,6 +711,14 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
|
if (isScale && iscsiName.length > 64) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`extent name cannot exceed 64 characters: ${iscsiName}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
this.ctx.logger.info(
|
this.ctx.logger.info(
|
||||||
"FreeNAS creating iscsi assets with name: " + iscsiName
|
"FreeNAS creating iscsi assets with name: " + iscsiName
|
||||||
);
|
);
|
||||||
|
|
@ -1994,7 +2016,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getHttpClient() {
|
async getHttpClient() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:http_client`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:http_client`, () => {
|
||||||
const client = new HttpClient(this.options.httpConnection);
|
const client = new HttpClient(this.options.httpConnection);
|
||||||
client.logger = this.ctx.logger;
|
client.logger = this.ctx.logger;
|
||||||
client.setApiVersion(2); // requires version 2
|
client.setApiVersion(2); // requires version 2
|
||||||
|
|
@ -2011,7 +2033,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getTrueNASHttpApiClient() {
|
async getTrueNASHttpApiClient() {
|
||||||
return registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
|
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
|
||||||
const httpClient = await this.getHttpClient();
|
const httpClient = await this.getHttpClient();
|
||||||
return new TrueNASApiClient(httpClient, this.ctx.cache);
|
return new TrueNASApiClient(httpClient, this.ctx.cache);
|
||||||
});
|
});
|
||||||
|
|
@ -2036,6 +2058,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
"MULTI_NODE_MULTI_WRITER",
|
"MULTI_NODE_MULTI_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
case "volume":
|
case "volume":
|
||||||
access_modes = [
|
access_modes = [
|
||||||
"UNKNOWN",
|
"UNKNOWN",
|
||||||
|
|
@ -2046,6 +2069,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
"MULTI_NODE_READER_ONLY",
|
"MULTI_NODE_READER_ONLY",
|
||||||
"MULTI_NODE_SINGLE_WRITER",
|
"MULTI_NODE_SINGLE_WRITER",
|
||||||
];
|
];
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (
|
if (
|
||||||
|
|
@ -2171,6 +2195,15 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await httpApiClient.getSystemVersion();
|
||||||
|
} catch (err) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`TrueNAS api is unavailable: ${String(err)}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if (!(await httpApiClient.getIsScale())) {
|
if (!(await httpApiClient.getIsScale())) {
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
grpc.status.FAILED_PRECONDITION,
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
|
@ -2178,9 +2211,9 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
return { ready: { value: true } };
|
return super.Probe(...arguments);
|
||||||
} else {
|
} else {
|
||||||
return { ready: { value: true } };
|
return super.Probe(...arguments);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -2205,7 +2238,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
|
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
|
||||||
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
|
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
|
||||||
let name = call.request.name;
|
let name = call.request.name;
|
||||||
let volume_id = await driver.getVolumeIdFromName(name);
|
let volume_id = await driver.getVolumeIdFromCall(call);
|
||||||
let volume_content_source = call.request.volume_content_source;
|
let volume_content_source = call.request.volume_content_source;
|
||||||
let minimum_volume_size = await driver.getMinimumVolumeSize();
|
let minimum_volume_size = await driver.getMinimumVolumeSize();
|
||||||
let default_required_bytes = 1073741824;
|
let default_required_bytes = 1073741824;
|
||||||
|
|
@ -2217,13 +2250,6 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!name) {
|
|
||||||
throw new GrpcError(
|
|
||||||
grpc.status.INVALID_ARGUMENT,
|
|
||||||
`volume name is required`
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (
|
if (
|
||||||
call.request.volume_capabilities &&
|
call.request.volume_capabilities &&
|
||||||
call.request.volume_capabilities.length > 0
|
call.request.volume_capabilities.length > 0
|
||||||
|
|
@ -2899,11 +2925,30 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
// this should be already set, but when coming from a volume source
|
// this should be already set, but when coming from a volume source
|
||||||
// it may not match that of the source
|
// it may not match that of the source
|
||||||
// TODO: probably need to recalculate size based on *actual* volume source blocksize in case of difference from currently configured
|
|
||||||
properties.volsize = capacity_bytes;
|
properties.volsize = capacity_bytes;
|
||||||
|
|
||||||
//dedup
|
// dedup
|
||||||
//compression
|
// on, off, verify
|
||||||
|
// zfs set dedup=on tank/home
|
||||||
|
// restore default must use the below
|
||||||
|
// zfs inherit [-rS] property filesystem|volume|snapshot…
|
||||||
|
if (
|
||||||
|
(typeof this.options.zfs.zvolDedup === "string" ||
|
||||||
|
this.options.zfs.zvolDedup instanceof String) &&
|
||||||
|
this.options.zfs.zvolDedup.length > 0
|
||||||
|
) {
|
||||||
|
properties.dedup = this.options.zfs.zvolDedup;
|
||||||
|
}
|
||||||
|
|
||||||
|
// compression
|
||||||
|
// lz4, gzip-9, etc
|
||||||
|
if (
|
||||||
|
(typeof this.options.zfs.zvolCompression === "string" ||
|
||||||
|
this.options.zfs.zvolCompression instanceof String) &&
|
||||||
|
this.options.zfs.zvolCompression > 0
|
||||||
|
) {
|
||||||
|
properties.compression = this.options.zfs.zvolCompression;
|
||||||
|
}
|
||||||
|
|
||||||
if (setProps) {
|
if (setProps) {
|
||||||
await httpApiClient.DatasetSet(datasetName, properties);
|
await httpApiClient.DatasetSet(datasetName, properties);
|
||||||
|
|
@ -3002,6 +3047,17 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
||||||
|
|
||||||
driver.ctx.logger.debug("dataset properties: %j", properties);
|
driver.ctx.logger.debug("dataset properties: %j", properties);
|
||||||
|
|
||||||
|
// deleteStrategy
|
||||||
|
const delete_strategy = _.get(
|
||||||
|
driver.options,
|
||||||
|
"_private.csi.volume.deleteStrategy",
|
||||||
|
""
|
||||||
|
);
|
||||||
|
|
||||||
|
if (delete_strategy == "retain") {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
// remove share resources
|
// remove share resources
|
||||||
await this.deleteShare(call, datasetName);
|
await this.deleteShare(call, datasetName);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
const registry = require("../../../utils/registry");
|
|
||||||
const { sleep, stringify } = require("../../../utils/general");
|
const { sleep, stringify } = require("../../../utils/general");
|
||||||
const { Zetabyte } = require("../../../utils/zfs");
|
const { Zetabyte } = require("../../../utils/zfs");
|
||||||
|
|
||||||
|
|
@ -22,7 +22,7 @@ class Api {
|
||||||
* @returns
|
* @returns
|
||||||
*/
|
*/
|
||||||
async getZetabyte() {
|
async getZetabyte() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:zb`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
|
||||||
return new Zetabyte({
|
return new Zetabyte({
|
||||||
executor: {
|
executor: {
|
||||||
spawn: function () {
|
spawn: function () {
|
||||||
|
|
@ -119,7 +119,11 @@ class Api {
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
if (systemVersion.v1) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
async getIsFreeNAS() {
|
async getIsFreeNAS() {
|
||||||
|
|
@ -239,7 +243,7 @@ class Api {
|
||||||
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
|
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
|
||||||
*/
|
*/
|
||||||
try {
|
try {
|
||||||
response = await httpClient.get(endpoint);
|
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
|
||||||
versionResponses.v2 = response;
|
versionResponses.v2 = response;
|
||||||
if (response.statusCode == 200) {
|
if (response.statusCode == 200) {
|
||||||
versionInfo.v2 = response.body;
|
versionInfo.v2 = response.body;
|
||||||
|
|
@ -263,7 +267,7 @@ class Api {
|
||||||
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
|
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
|
||||||
*/
|
*/
|
||||||
try {
|
try {
|
||||||
response = await httpClient.get(endpoint);
|
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
|
||||||
versionResponses.v1 = response;
|
versionResponses.v1 = response;
|
||||||
if (response.statusCode == 200 && IsJsonString(response.body)) {
|
if (response.statusCode == 200 && IsJsonString(response.body)) {
|
||||||
versionInfo.v1 = response.body;
|
versionInfo.v1 = response.body;
|
||||||
|
|
@ -698,15 +702,16 @@ class Api {
|
||||||
|
|
||||||
// wait for job to finish
|
// wait for job to finish
|
||||||
do {
|
do {
|
||||||
|
currentTime = Date.now() / 1000;
|
||||||
|
if (timeout > 0 && currentTime > startTime + timeout) {
|
||||||
|
throw new Error("timeout waiting for job to complete");
|
||||||
|
}
|
||||||
|
|
||||||
if (job) {
|
if (job) {
|
||||||
await sleep(check_interval);
|
await sleep(check_interval);
|
||||||
}
|
}
|
||||||
job = await this.CoreGetJobs({ id: job_id });
|
job = await this.CoreGetJobs({ id: job_id });
|
||||||
job = job[0];
|
job = job[0];
|
||||||
currentTime = Date.now() / 1000;
|
|
||||||
if (timeout > 0 && currentTime > startTime + timeout) {
|
|
||||||
throw new Error("timeout waiting for job to complete");
|
|
||||||
}
|
|
||||||
} while (!["SUCCESS", "ABORTED", "FAILED"].includes(job.state));
|
} while (!["SUCCESS", "ABORTED", "FAILED"].includes(job.state));
|
||||||
|
|
||||||
return job;
|
return job;
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ class Client {
|
||||||
|
|
||||||
// default to v1.0 for now
|
// default to v1.0 for now
|
||||||
if (!this.options.apiVersion) {
|
if (!this.options.apiVersion) {
|
||||||
this.options.apiVersion = 1;
|
this.options.apiVersion = 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -131,25 +131,33 @@ class Client {
|
||||||
delete options.httpAgent;
|
delete options.httpAgent;
|
||||||
delete options.httpsAgent;
|
delete options.httpsAgent;
|
||||||
|
|
||||||
this.logger.debug("FREENAS HTTP REQUEST: " + stringify(options));
|
let duration = parseFloat(
|
||||||
|
Math.round((_.get(response, "duration", 0) + Number.EPSILON) * 100) /
|
||||||
|
100 /
|
||||||
|
1000
|
||||||
|
).toFixed(2);
|
||||||
|
|
||||||
|
this.logger.debug("FREENAS HTTP REQUEST DETAILS: " + stringify(options));
|
||||||
|
this.logger.debug("FREENAS HTTP REQUEST DURATION: " + duration + "s");
|
||||||
this.logger.debug("FREENAS HTTP ERROR: " + error);
|
this.logger.debug("FREENAS HTTP ERROR: " + error);
|
||||||
this.logger.debug(
|
this.logger.debug(
|
||||||
"FREENAS HTTP STATUS: " + _.get(response, "statusCode", "")
|
"FREENAS HTTP RESPONSE STATUS CODE: " + _.get(response, "statusCode", "")
|
||||||
);
|
);
|
||||||
this.logger.debug(
|
this.logger.debug(
|
||||||
"FREENAS HTTP HEADERS: " + stringify(_.get(response, "headers", ""))
|
"FREENAS HTTP RESPONSE HEADERS: " +
|
||||||
|
stringify(_.get(response, "headers", ""))
|
||||||
);
|
);
|
||||||
this.logger.debug("FREENAS HTTP BODY: " + stringify(body));
|
this.logger.debug("FREENAS HTTP RESPONSE BODY: " + stringify(body));
|
||||||
}
|
}
|
||||||
|
|
||||||
async get(endpoint, data) {
|
async get(endpoint, data, options = {}) {
|
||||||
const client = this;
|
const client = this;
|
||||||
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
||||||
endpoint += "/";
|
endpoint += "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const options = client.getRequestCommonOptions();
|
options = { ...client.getRequestCommonOptions(), ...options };
|
||||||
options.method = "GET";
|
options.method = "GET";
|
||||||
options.url = this.getBaseURL() + endpoint;
|
options.url = this.getBaseURL() + endpoint;
|
||||||
options.params = data;
|
options.params = data;
|
||||||
|
|
@ -164,14 +172,14 @@ class Client {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async post(endpoint, data) {
|
async post(endpoint, data, options = {}) {
|
||||||
const client = this;
|
const client = this;
|
||||||
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
||||||
endpoint += "/";
|
endpoint += "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const options = client.getRequestCommonOptions();
|
options = { ...client.getRequestCommonOptions(), ...options };
|
||||||
options.method = "POST";
|
options.method = "POST";
|
||||||
options.url = this.getBaseURL() + endpoint;
|
options.url = this.getBaseURL() + endpoint;
|
||||||
options.data = data;
|
options.data = data;
|
||||||
|
|
@ -187,14 +195,14 @@ class Client {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async put(endpoint, data) {
|
async put(endpoint, data, options = {}) {
|
||||||
const client = this;
|
const client = this;
|
||||||
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
||||||
endpoint += "/";
|
endpoint += "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const options = client.getRequestCommonOptions();
|
options = { ...client.getRequestCommonOptions(), ...options };
|
||||||
options.method = "PUT";
|
options.method = "PUT";
|
||||||
options.url = this.getBaseURL() + endpoint;
|
options.url = this.getBaseURL() + endpoint;
|
||||||
options.data = data;
|
options.data = data;
|
||||||
|
|
@ -210,14 +218,14 @@ class Client {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async delete(endpoint, data) {
|
async delete(endpoint, data, options = {}) {
|
||||||
const client = this;
|
const client = this;
|
||||||
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
if (this.options.apiVersion == 1 && !endpoint.endsWith("/")) {
|
||||||
endpoint += "/";
|
endpoint += "/";
|
||||||
}
|
}
|
||||||
|
|
||||||
return new Promise((resolve, reject) => {
|
return new Promise((resolve, reject) => {
|
||||||
const options = client.getRequestCommonOptions();
|
options = { ...client.getRequestCommonOptions(), ...options };
|
||||||
options.method = "DELETE";
|
options.method = "DELETE";
|
||||||
options.url = this.getBaseURL() + endpoint;
|
options.url = this.getBaseURL() + endpoint;
|
||||||
options.data = data;
|
options.data = data;
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
const _ = require("lodash");
|
const _ = require("lodash");
|
||||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
const registry = require("../../utils/registry");
|
|
||||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||||
const HttpClient = require("./http").Client;
|
const HttpClient = require("./http").Client;
|
||||||
const TrueNASApiClient = require("./http/api").Api;
|
const TrueNASApiClient = require("./http/api").Api;
|
||||||
|
|
@ -28,8 +27,36 @@ const FREENAS_SYSTEM_VERSION_CACHE_KEY = "freenas:system_version";
|
||||||
const __REGISTRY_NS__ = "FreeNASSshDriver";
|
const __REGISTRY_NS__ = "FreeNASSshDriver";
|
||||||
|
|
||||||
class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
|
/**
|
||||||
|
* Ensure sane options are used etc
|
||||||
|
* true = ready
|
||||||
|
* false = not ready, but progressiong towards ready
|
||||||
|
* throw error = faulty setup
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
|
*/
|
||||||
|
async Probe(call) {
|
||||||
|
const driver = this;
|
||||||
|
|
||||||
|
if (driver.ctx.args.csiMode.includes("controller")) {
|
||||||
|
const httpApiClient = await driver.getTrueNASHttpApiClient();
|
||||||
|
try {
|
||||||
|
await httpApiClient.getSystemVersion();
|
||||||
|
} catch (err) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`TrueNAS api is unavailable: ${String(err)}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return super.Probe(...arguments);
|
||||||
|
} else {
|
||||||
|
return super.Probe(...arguments);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
getExecClient() {
|
getExecClient() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:exec_client`, () => {
|
||||||
return new SshClient({
|
return new SshClient({
|
||||||
logger: this.ctx.logger,
|
logger: this.ctx.logger,
|
||||||
connection: this.options.sshConnection,
|
connection: this.options.sshConnection,
|
||||||
|
|
@ -38,7 +65,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getZetabyte() {
|
async getZetabyte() {
|
||||||
return registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:zb`, async () => {
|
||||||
const sshClient = this.getExecClient();
|
const sshClient = this.getExecClient();
|
||||||
const options = {};
|
const options = {};
|
||||||
options.executor = new ZfsSshProcessManager(sshClient);
|
options.executor = new ZfsSshProcessManager(sshClient);
|
||||||
|
|
@ -98,7 +125,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
|
|
||||||
async getHttpClient(autoDetectVersion = true) {
|
async getHttpClient(autoDetectVersion = true) {
|
||||||
const autodetectkey = autoDetectVersion === true ? 1 : 0;
|
const autodetectkey = autoDetectVersion === true ? 1 : 0;
|
||||||
return registry.getAsync(
|
return this.ctx.registry.getAsync(
|
||||||
`${__REGISTRY_NS__}:http_client:autoDetectVersion_${autodetectkey}`,
|
`${__REGISTRY_NS__}:http_client:autoDetectVersion_${autodetectkey}`,
|
||||||
async () => {
|
async () => {
|
||||||
const client = new HttpClient(this.options.httpConnection);
|
const client = new HttpClient(this.options.httpConnection);
|
||||||
|
|
@ -115,7 +142,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
async getTrueNASHttpApiClient() {
|
async getTrueNASHttpApiClient() {
|
||||||
return registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
|
return this.ctx.registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
|
||||||
const httpClient = await this.getHttpClient();
|
const httpClient = await this.getHttpClient();
|
||||||
return new TrueNASApiClient(httpClient, this.ctx.cache);
|
return new TrueNASApiClient(httpClient, this.ctx.cache);
|
||||||
});
|
});
|
||||||
|
|
@ -231,8 +258,17 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
const apiVersion = httpClient.getApiVersion();
|
const apiVersion = httpClient.getApiVersion();
|
||||||
const zb = await this.getZetabyte();
|
const zb = await this.getZetabyte();
|
||||||
const truenasVersion = semver.coerce(
|
const truenasVersion = semver.coerce(
|
||||||
await httpApiClient.getSystemVersionMajorMinor()
|
await httpApiClient.getSystemVersionMajorMinor(),
|
||||||
|
{ loose: true }
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if (!truenasVersion) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNKNOWN,
|
||||||
|
`unable to detect TrueNAS version`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
const isScale = await httpApiClient.getIsScale();
|
const isScale = await httpApiClient.getIsScale();
|
||||||
|
|
||||||
let volume_context;
|
let volume_context;
|
||||||
|
|
@ -314,6 +350,11 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (isScale && semver.satisfies(truenasVersion, ">=23.10")) {
|
||||||
|
delete share.quiet;
|
||||||
|
delete share.nfs_quiet;
|
||||||
|
}
|
||||||
|
|
||||||
if (isScale && semver.satisfies(truenasVersion, ">=22.12")) {
|
if (isScale && semver.satisfies(truenasVersion, ">=22.12")) {
|
||||||
share.path = share.paths[0];
|
share.path = share.paths[0];
|
||||||
delete share.paths;
|
delete share.paths;
|
||||||
|
|
@ -728,6 +769,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
// According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.'
|
// According to RFC3270, 'Each iSCSI node, whether an initiator or target, MUST have an iSCSI name. Initiators and targets MUST support the receipt of iSCSI names of up to the maximum length of 223 bytes.'
|
||||||
// https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name
|
// https://kb.netapp.com/Advice_and_Troubleshooting/Miscellaneous/What_is_the_maximum_length_of_a_iSCSI_iqn_name
|
||||||
// https://tools.ietf.org/html/rfc3720
|
// https://tools.ietf.org/html/rfc3720
|
||||||
|
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
iscsiName = iscsiName.toLowerCase();
|
iscsiName = iscsiName.toLowerCase();
|
||||||
|
|
||||||
let extentDiskName = "zvol/" + datasetName;
|
let extentDiskName = "zvol/" + datasetName;
|
||||||
|
|
@ -742,7 +784,15 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
if (extentDiskName.length > maxZvolNameLength) {
|
if (extentDiskName.length > maxZvolNameLength) {
|
||||||
throw new GrpcError(
|
throw new GrpcError(
|
||||||
grpc.status.FAILED_PRECONDITION,
|
grpc.status.FAILED_PRECONDITION,
|
||||||
`extent disk name cannot exceed ${maxZvolNameLength} characters: ${extentDiskName}`
|
`extent disk name cannot exceed ${maxZvolNameLength} characters: ${extentDiskName}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
|
||||||
|
if (isScale && iscsiName.length > 64) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`extent name cannot exceed 64 characters: ${iscsiName}`
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -1983,6 +2033,9 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
let iscsiName =
|
let iscsiName =
|
||||||
properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
properties[FREENAS_ISCSI_ASSETS_NAME_PROPERTY_NAME].value;
|
||||||
|
|
||||||
|
// name correlates to the extent NOT the target
|
||||||
|
let kName = iscsiName.replaceAll(".", "_");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* command = execClient.buildCommand("systemctl", ["reload", "scst"]);
|
* command = execClient.buildCommand("systemctl", ["reload", "scst"]);
|
||||||
* does not help ^
|
* does not help ^
|
||||||
|
|
@ -1995,10 +2048,11 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
*
|
*
|
||||||
* midclt resync_lun_size_for_zvol tank/foo/bar
|
* midclt resync_lun_size_for_zvol tank/foo/bar
|
||||||
* works on SCALE only ^
|
* works on SCALE only ^
|
||||||
|
*
|
||||||
*/
|
*/
|
||||||
command = execClient.buildCommand("sh", [
|
command = execClient.buildCommand("sh", [
|
||||||
"-c",
|
"-c",
|
||||||
`echo 1 > /sys/kernel/scst_tgt/devices/${iscsiName}/resync_size`,
|
`"echo 1 > /sys/kernel/scst_tgt/devices/${kName}/resync_size"`,
|
||||||
]);
|
]);
|
||||||
reload = true;
|
reload = true;
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -2069,7 +2123,11 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
if (systemVersion.v1) {
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
async getIsFreeNAS() {
|
async getIsFreeNAS() {
|
||||||
|
|
@ -2194,7 +2252,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
|
* TrueNAS-SCALE-20.11-MASTER-20201127-092915
|
||||||
*/
|
*/
|
||||||
try {
|
try {
|
||||||
response = await httpClient.get(endpoint);
|
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
|
||||||
versionResponses.v2 = response;
|
versionResponses.v2 = response;
|
||||||
if (response.statusCode == 200) {
|
if (response.statusCode == 200) {
|
||||||
versionInfo.v2 = response.body;
|
versionInfo.v2 = response.body;
|
||||||
|
|
@ -2218,7 +2276,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
||||||
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
|
* {"fullversion": "FreeNAS-11.2-U5 (c129415c52)", "name": "FreeNAS", "version": ""}
|
||||||
*/
|
*/
|
||||||
try {
|
try {
|
||||||
response = await httpClient.get(endpoint);
|
response = await httpClient.get(endpoint, null, { timeout: 5 * 1000 });
|
||||||
versionResponses.v1 = response;
|
versionResponses.v1 = response;
|
||||||
if (response.statusCode == 200 && IsJsonString(response.body)) {
|
if (response.statusCode == 200 && IsJsonString(response.body)) {
|
||||||
versionInfo.v1 = response.body;
|
versionInfo.v1 = response.body;
|
||||||
|
|
|
||||||
|
|
@ -5,16 +5,16 @@ const fs = require("fs");
|
||||||
const CsiProxyClient = require("../utils/csi_proxy_client").CsiProxyClient;
|
const CsiProxyClient = require("../utils/csi_proxy_client").CsiProxyClient;
|
||||||
const k8s = require("@kubernetes/client-node");
|
const k8s = require("@kubernetes/client-node");
|
||||||
const { GrpcError, grpc } = require("../utils/grpc");
|
const { GrpcError, grpc } = require("../utils/grpc");
|
||||||
|
const Handlebars = require("handlebars");
|
||||||
const { Mount } = require("../utils/mount");
|
const { Mount } = require("../utils/mount");
|
||||||
|
const { ObjectiveFS } = require("../utils/objectivefs");
|
||||||
const { OneClient } = require("../utils/oneclient");
|
const { OneClient } = require("../utils/oneclient");
|
||||||
const { Filesystem } = require("../utils/filesystem");
|
const { Filesystem } = require("../utils/filesystem");
|
||||||
const { ISCSI } = require("../utils/iscsi");
|
const { ISCSI } = require("../utils/iscsi");
|
||||||
const { NVMEoF } = require("../utils/nvmeof");
|
const { NVMEoF } = require("../utils/nvmeof");
|
||||||
const registry = require("../utils/registry");
|
|
||||||
const semver = require("semver");
|
const semver = require("semver");
|
||||||
const GeneralUtils = require("../utils/general");
|
const GeneralUtils = require("../utils/general");
|
||||||
const { Zetabyte } = require("../utils/zfs");
|
const { Zetabyte } = require("../utils/zfs");
|
||||||
const { transport } = require("winston");
|
|
||||||
|
|
||||||
const __REGISTRY_NS__ = "CsiBaseDriver";
|
const __REGISTRY_NS__ = "CsiBaseDriver";
|
||||||
|
|
||||||
|
|
@ -110,7 +110,7 @@ class CsiBaseDriver {
|
||||||
* @returns Filesystem
|
* @returns Filesystem
|
||||||
*/
|
*/
|
||||||
getDefaultFilesystemInstance() {
|
getDefaultFilesystemInstance() {
|
||||||
return registry.get(
|
return this.ctx.registry.get(
|
||||||
`${__REGISTRY_NS__}:default_filesystem_instance`,
|
`${__REGISTRY_NS__}:default_filesystem_instance`,
|
||||||
() => {
|
() => {
|
||||||
return new Filesystem();
|
return new Filesystem();
|
||||||
|
|
@ -124,7 +124,7 @@ class CsiBaseDriver {
|
||||||
* @returns Mount
|
* @returns Mount
|
||||||
*/
|
*/
|
||||||
getDefaultMountInstance() {
|
getDefaultMountInstance() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:default_mount_instance`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_mount_instance`, () => {
|
||||||
const filesystem = this.getDefaultFilesystemInstance();
|
const filesystem = this.getDefaultFilesystemInstance();
|
||||||
return new Mount({ filesystem });
|
return new Mount({ filesystem });
|
||||||
});
|
});
|
||||||
|
|
@ -136,7 +136,7 @@ class CsiBaseDriver {
|
||||||
* @returns ISCSI
|
* @returns ISCSI
|
||||||
*/
|
*/
|
||||||
getDefaultISCSIInstance() {
|
getDefaultISCSIInstance() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:default_iscsi_instance`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_iscsi_instance`, () => {
|
||||||
return new ISCSI();
|
return new ISCSI();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
@ -148,13 +148,13 @@ class CsiBaseDriver {
|
||||||
*/
|
*/
|
||||||
getDefaultNVMEoFInstance() {
|
getDefaultNVMEoFInstance() {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
return registry.get(`${__REGISTRY_NS__}:default_nvmeof_instance`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_nvmeof_instance`, () => {
|
||||||
return new NVMEoF({ logger: driver.ctx.logger });
|
return new NVMEoF({ logger: driver.ctx.logger });
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
getDefaultZetabyteInstance() {
|
getDefaultZetabyteInstance() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:default_zb_instance`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_zb_instance`, () => {
|
||||||
return new Zetabyte({
|
return new Zetabyte({
|
||||||
idempotent: true,
|
idempotent: true,
|
||||||
paths: {
|
paths: {
|
||||||
|
|
@ -176,17 +176,29 @@ class CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
getDefaultOneClientInstance() {
|
getDefaultOneClientInstance() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:default_oneclient_instance`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_oneclient_instance`, () => {
|
||||||
return new OneClient();
|
return new OneClient();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
getDefaultObjectiveFSInstance() {
|
||||||
|
const driver = this;
|
||||||
|
return this.ctx.registry.get(
|
||||||
|
`${__REGISTRY_NS__}:default_objectivefs_instance`,
|
||||||
|
() => {
|
||||||
|
return new ObjectiveFS({
|
||||||
|
pool: _.get(driver.options, "objectivefs.pool"),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* @returns CsiProxyClient
|
* @returns CsiProxyClient
|
||||||
*/
|
*/
|
||||||
getDefaultCsiProxyClientInstance() {
|
getDefaultCsiProxyClientInstance() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:default_csi_proxy_instance`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:default_csi_proxy_instance`, () => {
|
||||||
const options = {};
|
const options = {};
|
||||||
options.services = _.get(this.options, "node.csiProxy.services", {});
|
options.services = _.get(this.options, "node.csiProxy.services", {});
|
||||||
return new CsiProxyClient(options);
|
return new CsiProxyClient(options);
|
||||||
|
|
@ -194,7 +206,7 @@ class CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
getDefaultKubernetsConfigInstance() {
|
getDefaultKubernetsConfigInstance() {
|
||||||
return registry.get(
|
return this.ctx.registry.get(
|
||||||
`${__REGISTRY_NS__}:default_kubernetes_config_instance`,
|
`${__REGISTRY_NS__}:default_kubernetes_config_instance`,
|
||||||
() => {
|
() => {
|
||||||
const kc = new k8s.KubeConfig();
|
const kc = new k8s.KubeConfig();
|
||||||
|
|
@ -366,26 +378,123 @@ class CsiBaseDriver {
|
||||||
* the value of `volume_id` to play nicely with scenarios that do not support
|
* the value of `volume_id` to play nicely with scenarios that do not support
|
||||||
* long names (ie: smb share, etc)
|
* long names (ie: smb share, etc)
|
||||||
*
|
*
|
||||||
* @param {*} name
|
* per csi, strings have a max size of 128 bytes, volume_id should NOT
|
||||||
|
* execeed this limit
|
||||||
|
*
|
||||||
|
* Any Unicode string that conforms to the length limit is allowed
|
||||||
|
* except those containing the following banned characters:
|
||||||
|
* U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
|
||||||
|
* (These are control characters other than commonly used whitespace.)
|
||||||
|
*
|
||||||
|
* https://github.com/container-storage-interface/spec/blob/master/spec.md#size-limits
|
||||||
|
* https://docs.oracle.com/cd/E26505_01/html/E37384/gbcpt.html
|
||||||
|
*
|
||||||
|
* @param {*} call
|
||||||
* @returns
|
* @returns
|
||||||
*/
|
*/
|
||||||
async getVolumeIdFromName(name) {
|
async getVolumeIdFromCall(call) {
|
||||||
const driver = this;
|
const driver = this;
|
||||||
const strategy = _.get(
|
let volume_id = call.request.name;
|
||||||
|
|
||||||
|
if (!volume_id) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`volume name is required`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
const idTemplate = _.get(
|
||||||
|
driver.options,
|
||||||
|
"_private.csi.volume.idTemplate",
|
||||||
|
""
|
||||||
|
);
|
||||||
|
if (idTemplate) {
|
||||||
|
volume_id = Handlebars.compile(idTemplate)({
|
||||||
|
name: call.request.name,
|
||||||
|
parameters: call.request.parameters,
|
||||||
|
});
|
||||||
|
|
||||||
|
if (!volume_id) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`generated volume_id is empty, idTemplate may be invalid`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const hash_strategy = _.get(
|
||||||
driver.options,
|
driver.options,
|
||||||
"_private.csi.volume.idHash.strategy",
|
"_private.csi.volume.idHash.strategy",
|
||||||
""
|
""
|
||||||
);
|
);
|
||||||
switch (strategy.toLowerCase()) {
|
|
||||||
case "md5":
|
if (hash_strategy) {
|
||||||
return GeneralUtils.md5(name);
|
switch (hash_strategy.toLowerCase()) {
|
||||||
case "crc32":
|
case "md5":
|
||||||
return GeneralUtils.crc32(name);
|
volume_id = GeneralUtils.md5(volume_id);
|
||||||
case "crc16":
|
break;
|
||||||
return GeneralUtils.crc16(name);
|
case "crc8":
|
||||||
default:
|
volume_id = GeneralUtils.crc8(volume_id);
|
||||||
return name;
|
break;
|
||||||
|
case "crc16":
|
||||||
|
volume_id = GeneralUtils.crc16(volume_id);
|
||||||
|
break;
|
||||||
|
case "crc32":
|
||||||
|
volume_id = GeneralUtils.crc32(volume_id);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`unkown hash strategy: ${hash_strategy}`
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
volume_id = String(volume_id);
|
||||||
|
|
||||||
|
if (volume_id.length > 128) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`generated volume_id '${volume_id}' is too large`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (volume_id.length < 1) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`generated volume_id '${volume_id}' is too small`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* technically zfs allows `:` and `.` in addition to `_` and `-`
|
||||||
|
* TODO: make this more specific to each driver
|
||||||
|
* in particular Nomad per-alloc feature uses names with <name>-[<index>] syntax so square brackets are present
|
||||||
|
* TODO: allow for replacing chars vs absolute failure?
|
||||||
|
*/
|
||||||
|
let invalid_chars;
|
||||||
|
invalid_chars = volume_id.match(/[^a-z0-9_\-]/gi);
|
||||||
|
if (invalid_chars) {
|
||||||
|
invalid_chars = String.prototype.concat(
|
||||||
|
...new Set(invalid_chars.join(""))
|
||||||
|
);
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`generated volume_id '${volume_id}' contains invalid characters: '${invalid_chars}'`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Dataset names must begin with an alphanumeric character.
|
||||||
|
*/
|
||||||
|
if (!/^[a-z0-9]/gi.test(volume_id)) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.INVALID_ARGUMENT,
|
||||||
|
`generated volume_id '${volume_id}' must begin with alphanumeric character`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return volume_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
async GetPluginInfo(call) {
|
async GetPluginInfo(call) {
|
||||||
|
|
@ -634,6 +743,7 @@ class CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
switch (node_attach_driver) {
|
switch (node_attach_driver) {
|
||||||
|
case "objectivefs":
|
||||||
case "oneclient":
|
case "oneclient":
|
||||||
// move along
|
// move along
|
||||||
break;
|
break;
|
||||||
|
|
@ -707,10 +817,11 @@ class CsiBaseDriver {
|
||||||
if (!has_guest) {
|
if (!has_guest) {
|
||||||
mount_flags.push("guest");
|
mount_flags.push("guest");
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (volume_mount_group) {
|
// handle node service VOLUME_MOUNT_GROUP
|
||||||
mount_flags.push(`gid=${volume_mount_group}`);
|
if (volume_mount_group) {
|
||||||
}
|
mount_flags.push(`gid=${volume_mount_group}`);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case "iscsi":
|
case "iscsi":
|
||||||
|
|
@ -803,12 +914,15 @@ class CsiBaseDriver {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const sessionParsedPortal = iscsi.parsePortal(session.portal);
|
||||||
|
|
||||||
// rescan in scenarios when login previously occurred but volumes never appeared
|
// rescan in scenarios when login previously occurred but volumes never appeared
|
||||||
await iscsi.iscsiadm.rescanSession(session);
|
await iscsi.iscsiadm.rescanSession(session);
|
||||||
|
|
||||||
// find device name
|
// find device name
|
||||||
device = iscsi.devicePathByPortalIQNLUN(
|
device = await iscsi.devicePathByPortalIQNLUN(
|
||||||
iscsiConnection.portal,
|
//iscsiConnection.portal,
|
||||||
|
`${sessionParsedPortal.host}:${sessionParsedPortal.port}`,
|
||||||
iscsiConnection.iqn,
|
iscsiConnection.iqn,
|
||||||
iscsiConnection.lun
|
iscsiConnection.lun
|
||||||
);
|
);
|
||||||
|
|
@ -1152,6 +1266,79 @@ class CsiBaseDriver {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
case "objectivefs":
|
||||||
|
let objectivefs = driver.getDefaultObjectiveFSInstance();
|
||||||
|
let ofs_filesystem = volume_context.filesystem;
|
||||||
|
let env = {};
|
||||||
|
|
||||||
|
for (const key in normalizedSecrets) {
|
||||||
|
if (key.startsWith("env.")) {
|
||||||
|
env[key.substr("env.".length)] = normalizedSecrets[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const key in volume_context) {
|
||||||
|
if (key.startsWith("env.")) {
|
||||||
|
env[key.substr("env.".length)] = volume_context[key];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ofs_filesystem) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`missing ofs volume filesystem`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let ofs_object_store = env["OBJECTSTORE"];
|
||||||
|
if (!ofs_object_store) {
|
||||||
|
ofs_object_store = await objectivefs.getObjectStoreFromFilesystem(
|
||||||
|
ofs_filesystem
|
||||||
|
);
|
||||||
|
if (ofs_object_store) {
|
||||||
|
env["OBJECTSTORE"] = ofs_object_store;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!ofs_object_store) {
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.FAILED_PRECONDITION,
|
||||||
|
`missing required ofs volume env.OBJECTSTORE`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// normalize fs to not include objectstore
|
||||||
|
ofs_filesystem = await objectivefs.stripObjectStoreFromFilesystem(
|
||||||
|
ofs_filesystem
|
||||||
|
);
|
||||||
|
|
||||||
|
device = `${ofs_object_store}${ofs_filesystem}`;
|
||||||
|
result = await mount.deviceIsMountedAtPath(
|
||||||
|
device,
|
||||||
|
staging_target_path
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
result = await objectivefs.mount(
|
||||||
|
env,
|
||||||
|
ofs_filesystem,
|
||||||
|
staging_target_path,
|
||||||
|
mount_flags
|
||||||
|
);
|
||||||
|
|
||||||
|
if (result) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
throw new GrpcError(
|
||||||
|
grpc.status.UNKNOWN,
|
||||||
|
`failed to mount objectivefs: ${device}`
|
||||||
|
);
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case "oneclient":
|
case "oneclient":
|
||||||
let oneclient = driver.getDefaultOneClientInstance();
|
let oneclient = driver.getDefaultOneClientInstance();
|
||||||
|
|
@ -2838,6 +3025,7 @@ class CsiBaseDriver {
|
||||||
case "nfs":
|
case "nfs":
|
||||||
case "smb":
|
case "smb":
|
||||||
case "lustre":
|
case "lustre":
|
||||||
|
case "objectivefs":
|
||||||
case "oneclient":
|
case "oneclient":
|
||||||
case "hostpath":
|
case "hostpath":
|
||||||
case "iscsi":
|
case "iscsi":
|
||||||
|
|
|
||||||
|
|
@ -121,6 +121,10 @@ class NodeManualDriver extends CsiBaseDriver {
|
||||||
driverResourceType = "filesystem";
|
driverResourceType = "filesystem";
|
||||||
fs_types = ["lustre"];
|
fs_types = ["lustre"];
|
||||||
break;
|
break;
|
||||||
|
case "objectivefs":
|
||||||
|
driverResourceType = "filesystem";
|
||||||
|
fs_types = ["objectivefs", "fuse.objectivefs"];
|
||||||
|
break;
|
||||||
case "oneclient":
|
case "oneclient":
|
||||||
driverResourceType = "filesystem";
|
driverResourceType = "filesystem";
|
||||||
fs_types = ["oneclient", "fuse.oneclient"];
|
fs_types = ["oneclient", "fuse.oneclient"];
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ const fs = require("fs");
|
||||||
const { CsiBaseDriver } = require("../index");
|
const { CsiBaseDriver } = require("../index");
|
||||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||||
const { Filesystem } = require("../../utils/filesystem");
|
const { Filesystem } = require("../../utils/filesystem");
|
||||||
const registry = require("../../utils/registry");
|
|
||||||
const semver = require("semver");
|
const semver = require("semver");
|
||||||
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
const SshClient = require("../../utils/zfs_ssh_exec_client").SshClient;
|
||||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||||
|
|
@ -125,7 +124,7 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
getSshClient() {
|
getSshClient() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:ssh_client`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:ssh_client`, () => {
|
||||||
return new SshClient({
|
return new SshClient({
|
||||||
logger: this.ctx.logger,
|
logger: this.ctx.logger,
|
||||||
connection: this.options.sshConnection,
|
connection: this.options.sshConnection,
|
||||||
|
|
@ -134,7 +133,7 @@ class ZfsLocalEphemeralInlineDriver extends CsiBaseDriver {
|
||||||
}
|
}
|
||||||
|
|
||||||
getZetabyte() {
|
getZetabyte() {
|
||||||
return registry.get(`${__REGISTRY_NS__}:zb`, () => {
|
return this.ctx.registry.get(`${__REGISTRY_NS__}:zb`, () => {
|
||||||
let sshClient;
|
let sshClient;
|
||||||
let executor;
|
let executor;
|
||||||
if (this.options.sshConnection) {
|
if (this.options.sshConnection) {
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,32 @@ const _ = require("lodash");
|
||||||
const axios = require("axios");
|
const axios = require("axios");
|
||||||
const crypto = require("crypto");
|
const crypto = require("crypto");
|
||||||
const dns = require("dns");
|
const dns = require("dns");
|
||||||
|
const crc = require("crc");
|
||||||
|
|
||||||
|
axios.interceptors.request.use(
|
||||||
|
function (config) {
|
||||||
|
config.metadata = { startTime: new Date() };
|
||||||
|
return config;
|
||||||
|
},
|
||||||
|
function (error) {
|
||||||
|
return Promise.reject(error);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
axios.interceptors.response.use(
|
||||||
|
function (response) {
|
||||||
|
response.config.metadata.endTime = new Date();
|
||||||
|
response.duration =
|
||||||
|
response.config.metadata.endTime - response.config.metadata.startTime;
|
||||||
|
return response;
|
||||||
|
},
|
||||||
|
function (error) {
|
||||||
|
error.config.metadata.endTime = new Date();
|
||||||
|
error.duration =
|
||||||
|
error.config.metadata.endTime - error.config.metadata.startTime;
|
||||||
|
return Promise.reject(error);
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
function sleep(ms) {
|
function sleep(ms) {
|
||||||
return new Promise((resolve) => {
|
return new Promise((resolve) => {
|
||||||
|
|
@ -24,58 +50,16 @@ function md5(val) {
|
||||||
return crypto.createHash("md5").update(val).digest("hex");
|
return crypto.createHash("md5").update(val).digest("hex");
|
||||||
}
|
}
|
||||||
|
|
||||||
function crc32(val) {
|
function crc8(data) {
|
||||||
for (var a, o = [], c = 0; c < 256; c++) {
|
return crc.crc8(data);
|
||||||
a = c;
|
|
||||||
for (var f = 0; f < 8; f++) a = 1 & a ? 3988292384 ^ (a >>> 1) : a >>> 1;
|
|
||||||
o[c] = a;
|
|
||||||
}
|
|
||||||
for (var n = -1, t = 0; t < val.length; t++)
|
|
||||||
n = (n >>> 8) ^ o[255 & (n ^ val.charCodeAt(t))];
|
|
||||||
return (-1 ^ n) >>> 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const crctab16 = new Uint16Array([
|
|
||||||
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, 0x8c48,
|
|
||||||
0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, 0x1081, 0x0108,
|
|
||||||
0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, 0x9cc9, 0x8d40, 0xbfdb,
|
|
||||||
0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, 0x2102, 0x308b, 0x0210, 0x1399,
|
|
||||||
0x6726, 0x76af, 0x4434, 0x55bd, 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e,
|
|
||||||
0xfae7, 0xc87c, 0xd9f5, 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e,
|
|
||||||
0x54b5, 0x453c, 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd,
|
|
||||||
0xc974, 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
|
|
||||||
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, 0x5285,
|
|
||||||
0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, 0xdecd, 0xcf44,
|
|
||||||
0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, 0x6306, 0x728f, 0x4014,
|
|
||||||
0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, 0xef4e, 0xfec7, 0xcc5c, 0xddd5,
|
|
||||||
0xa96a, 0xb8e3, 0x8a78, 0x9bf1, 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3,
|
|
||||||
0x242a, 0x16b1, 0x0738, 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862,
|
|
||||||
0x9af9, 0x8b70, 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e,
|
|
||||||
0xf0b7, 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
|
|
||||||
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, 0x18c1,
|
|
||||||
0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, 0xa50a, 0xb483,
|
|
||||||
0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, 0x2942, 0x38cb, 0x0a50,
|
|
||||||
0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, 0xb58b, 0xa402, 0x9699, 0x8710,
|
|
||||||
0xf3af, 0xe226, 0xd0bd, 0xc134, 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7,
|
|
||||||
0x6e6e, 0x5cf5, 0x4d7c, 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1,
|
|
||||||
0xa33a, 0xb2b3, 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72,
|
|
||||||
0x3efb, 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
|
|
||||||
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, 0xe70e,
|
|
||||||
0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, 0x6b46, 0x7acf,
|
|
||||||
0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, 0xf78f, 0xe606, 0xd49d,
|
|
||||||
0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, 0x7bc7, 0x6a4e, 0x58d5, 0x495c,
|
|
||||||
0x3de3, 0x2c6a, 0x1ef1, 0x0f78,
|
|
||||||
]);
|
|
||||||
|
|
||||||
// calculate the 16-bit CRC of data with predetermined length.
|
|
||||||
function crc16(data) {
|
function crc16(data) {
|
||||||
var res = 0x0ffff;
|
return crc.crc16(data);
|
||||||
|
}
|
||||||
|
|
||||||
for (let b of data) {
|
function crc32(data) {
|
||||||
res = ((res >> 8) & 0x0ff) ^ crctab16[(res ^ b) & 0xff];
|
return crc.crc32(data);
|
||||||
}
|
|
||||||
|
|
||||||
return ~res & 0x0ffff;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function lockKeysFromRequest(call, serviceMethodName) {
|
function lockKeysFromRequest(call, serviceMethodName) {
|
||||||
|
|
@ -197,6 +181,20 @@ function stringify(value) {
|
||||||
return JSON.stringify(value, getCircularReplacer());
|
return JSON.stringify(value, getCircularReplacer());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function before_string(target, search) {
|
||||||
|
if (!target.includes(search)) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
return target.substring(0, target.indexOf(search));
|
||||||
|
}
|
||||||
|
|
||||||
|
function after_string(target, search) {
|
||||||
|
if (!target.includes(search)) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
return target.substring(target.indexOf(search) + search.length);
|
||||||
|
}
|
||||||
|
|
||||||
function default_supported_block_filesystems() {
|
function default_supported_block_filesystems() {
|
||||||
return ["btrfs", "exfat", "ext3", "ext4", "ext4dev", "ntfs", "vfat", "xfs"];
|
return ["btrfs", "exfat", "ext3", "ext4", "ext4dev", "ntfs", "vfat", "xfs"];
|
||||||
}
|
}
|
||||||
|
|
@ -278,9 +276,12 @@ module.exports.sleep = sleep;
|
||||||
module.exports.md5 = md5;
|
module.exports.md5 = md5;
|
||||||
module.exports.crc32 = crc32;
|
module.exports.crc32 = crc32;
|
||||||
module.exports.crc16 = crc16;
|
module.exports.crc16 = crc16;
|
||||||
|
module.exports.crc8 = crc8;
|
||||||
module.exports.lockKeysFromRequest = lockKeysFromRequest;
|
module.exports.lockKeysFromRequest = lockKeysFromRequest;
|
||||||
module.exports.getLargestNumber = getLargestNumber;
|
module.exports.getLargestNumber = getLargestNumber;
|
||||||
module.exports.stringify = stringify;
|
module.exports.stringify = stringify;
|
||||||
|
module.exports.before_string = before_string;
|
||||||
|
module.exports.after_string = after_string;
|
||||||
module.exports.stripWindowsDriveLetter = stripWindowsDriveLetter;
|
module.exports.stripWindowsDriveLetter = stripWindowsDriveLetter;
|
||||||
module.exports.hasWindowsDriveLetter = hasWindowsDriveLetter;
|
module.exports.hasWindowsDriveLetter = hasWindowsDriveLetter;
|
||||||
module.exports.axios_request = axios_request;
|
module.exports.axios_request = axios_request;
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
const cp = require("child_process");
|
const cp = require("child_process");
|
||||||
const { sleep } = require("./general");
|
const { hostname_lookup, sleep } = require("./general");
|
||||||
|
const net = require("net");
|
||||||
|
|
||||||
function getIscsiValue(value) {
|
function getIscsiValue(value) {
|
||||||
if (value == "<empty>") return null;
|
if (value == "<empty>") return null;
|
||||||
|
|
@ -179,12 +180,43 @@ class ISCSI {
|
||||||
const sessions = await iscsi.iscsiadm.getSessions();
|
const sessions = await iscsi.iscsiadm.getSessions();
|
||||||
|
|
||||||
let parsedPortal = iscsi.parsePortal(portal);
|
let parsedPortal = iscsi.parsePortal(portal);
|
||||||
|
let parsedPortalHostIP = "";
|
||||||
|
if (parsedPortal.host) {
|
||||||
|
// if host is not an ip address
|
||||||
|
let parsedPortalHost = parsedPortal.host
|
||||||
|
.replaceAll("[", "")
|
||||||
|
.replaceAll("]", "");
|
||||||
|
if (net.isIP(parsedPortalHost) == 0) {
|
||||||
|
// ipv6 response is without []
|
||||||
|
try {
|
||||||
|
parsedPortalHostIP =
|
||||||
|
(await hostname_lookup(parsedPortal.host)) || "";
|
||||||
|
} catch (err) {
|
||||||
|
console.log(
|
||||||
|
`failed to lookup hostname: host - ${parsedPortal.host}, error - ${err}`
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// set invalid hostname/ip string to ensure empty values do not errantly pass
|
||||||
|
if (!parsedPortalHostIP) {
|
||||||
|
parsedPortalHostIP = "--------------------------------------";
|
||||||
|
}
|
||||||
let session = false;
|
let session = false;
|
||||||
sessions.every((i_session) => {
|
sessions.every((i_session) => {
|
||||||
|
// [2a10:4741:36:28:e61d:2dff:fe90:80fe]:3260
|
||||||
|
// i_session.portal includes [] for ipv6
|
||||||
if (
|
if (
|
||||||
`${i_session.iqn}` == tgtIQN &&
|
`${i_session.iqn}` == tgtIQN &&
|
||||||
(portal == i_session.portal ||
|
(portal == i_session.portal ||
|
||||||
`[${parsedPortal.host}]:${parsedPortal.port}` == i_session.portal)
|
`${parsedPortal.host}:${parsedPortal.port}` == i_session.portal ||
|
||||||
|
`${parsedPortalHostIP}:${parsedPortal.port}` ==
|
||||||
|
i_session.portal ||
|
||||||
|
`[${parsedPortal.host}]:${parsedPortal.port}` ==
|
||||||
|
i_session.portal ||
|
||||||
|
`[${parsedPortalHostIP}]:${parsedPortal.port}` ==
|
||||||
|
i_session.portal)
|
||||||
) {
|
) {
|
||||||
session = i_session;
|
session = i_session;
|
||||||
return false;
|
return false;
|
||||||
|
|
@ -560,11 +592,12 @@ class ISCSI {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
devicePathByPortalIQNLUN(portal, iqn, lun) {
|
async devicePathByPortalIQNLUN(portal, iqn, lun, options = {}) {
|
||||||
const parsedPortal = this.parsePortal(portal);
|
const parsedPortal = this.parsePortal(portal);
|
||||||
const portalHost = parsedPortal.host
|
let portalHost = parsedPortal.host.replaceAll("[", "").replaceAll("]", "");
|
||||||
.replaceAll("[", "")
|
if (options.hostname_lookup && net.isIP(portalHost) == 0) {
|
||||||
.replaceAll("]", "");
|
portalHost = (await hostname_lookup(portalHost)) || portalHost;
|
||||||
|
}
|
||||||
return `/dev/disk/by-path/ip-${portalHost}:${parsedPortal.port}-iscsi-${iqn}-lun-${lun}`;
|
return `/dev/disk/by-path/ip-${portalHost}:${parsedPortal.port}-iscsi-${iqn}-lun-${lun}`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,349 @@
|
||||||
|
const _ = require("lodash");
|
||||||
|
const cp = require("child_process");
|
||||||
|
const uuidv4 = require("uuid").v4;
|
||||||
|
|
||||||
|
const DEFAULT_TIMEOUT = process.env.KOPIA_DEFAULT_TIMEOUT || 90000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* https://kopia.io/
|
||||||
|
*/
|
||||||
|
class Kopia {
|
||||||
|
constructor(options = {}) {
|
||||||
|
const kopia = this;
|
||||||
|
kopia.options = options;
|
||||||
|
kopia.client_intance_uuid = uuidv4();
|
||||||
|
|
||||||
|
options.paths = options.paths || {};
|
||||||
|
if (!options.paths.kopia) {
|
||||||
|
options.paths.kopia = "kopia";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.paths.sudo) {
|
||||||
|
options.paths.sudo = "/usr/bin/sudo";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.paths.chroot) {
|
||||||
|
options.paths.chroot = "/usr/sbin/chroot";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.env) {
|
||||||
|
options.env = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
options.env[
|
||||||
|
"KOPIA_CONFIG_PATH"
|
||||||
|
] = `/tmp/kopia/${kopia.client_intance_uuid}/repository.config`;
|
||||||
|
options.env["KOPIA_CHECK_FOR_UPDATES"] = "false";
|
||||||
|
options.env[
|
||||||
|
"KOPIA_CACHE_DIRECTORY"
|
||||||
|
] = `/tmp/kopia/${kopia.client_intance_uuid}/cache`;
|
||||||
|
options.env[
|
||||||
|
"KOPIA_LOG_DIR"
|
||||||
|
] = `/tmp/kopia/${kopia.client_intance_uuid}/log`;
|
||||||
|
|
||||||
|
if (!options.executor) {
|
||||||
|
options.executor = {
|
||||||
|
spawn: cp.spawn,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.logger) {
|
||||||
|
options.logger = console;
|
||||||
|
}
|
||||||
|
|
||||||
|
options.logger.info(
|
||||||
|
`kopia client instantiated with client_instance_uuid: ${kopia.client_intance_uuid}`
|
||||||
|
);
|
||||||
|
|
||||||
|
if (!options.global_flags) {
|
||||||
|
options.global_flags = [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia repository connect
|
||||||
|
*
|
||||||
|
* https://kopia.io/docs/reference/command-line/common/repository-connect-from-config/
|
||||||
|
*
|
||||||
|
* --override-hostname
|
||||||
|
* --override-username
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async repositoryConnect(options = []) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = ["repository", "connect"];
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await kopia.exec(kopia.options.paths.kopia, args);
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia repository status
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async repositoryStatus(options = []) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = ["repository", "status", "--json"];
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await kopia.exec(kopia.options.paths.kopia, args);
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia snapshot list
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshotList(options = []) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["snapshot", "list", "--json"]);
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await kopia.exec(kopia.options.paths.kopia, args, {
|
||||||
|
operation: "snapshot-list",
|
||||||
|
});
|
||||||
|
|
||||||
|
return result.parsed;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia snapshot list
|
||||||
|
*
|
||||||
|
* @param {*} snapshot_id
|
||||||
|
*/
|
||||||
|
async snapshotGet(snapshot_id) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["snapshot", "list", "--json", "--all"]);
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await kopia.exec(kopia.options.paths.kopia, args, {
|
||||||
|
operation: "snapshot-list",
|
||||||
|
});
|
||||||
|
|
||||||
|
return result.parsed.find((item) => {
|
||||||
|
return item.id == snapshot_id;
|
||||||
|
});
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia snapshot create
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshotCreate(options = []) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["snapshot", "create", "--json"]);
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await kopia.exec(kopia.options.paths.kopia, args, {
|
||||||
|
operation: "snapshot-create",
|
||||||
|
});
|
||||||
|
|
||||||
|
return result.parsed;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia snapshot delete <id>
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshotDelete(options = []) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["snapshot", "delete", "--delete"]);
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await kopia.exec(kopia.options.paths.kopia, args, {
|
||||||
|
operation: "snapshot-delete",
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
if (
|
||||||
|
err.code == 1 &&
|
||||||
|
(err.stderr.includes("no snapshots matched") ||
|
||||||
|
err.stderr.includes("invalid content hash"))
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* kopia snapshot restore <snapshot_id[/sub/path]> /path/to/restore/to
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshotRestore(options = []) {
|
||||||
|
const kopia = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["snapshot", "restore"]);
|
||||||
|
args = args.concat(kopia.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await kopia.exec(kopia.options.paths.kopia, args, {
|
||||||
|
operation: "snapshot-restore",
|
||||||
|
});
|
||||||
|
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
if (
|
||||||
|
err.code == 1 &&
|
||||||
|
(err.stderr.includes("no snapshots matched") ||
|
||||||
|
err.stderr.includes("invalid content hash"))
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
exec(command, args, options = {}) {
|
||||||
|
if (!options.hasOwnProperty("timeout")) {
|
||||||
|
options.timeout = DEFAULT_TIMEOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
const kopia = this;
|
||||||
|
args = args || [];
|
||||||
|
|
||||||
|
if (kopia.options.sudo) {
|
||||||
|
args.unshift(command);
|
||||||
|
command = kopia.options.paths.sudo;
|
||||||
|
}
|
||||||
|
|
||||||
|
options.env = {
|
||||||
|
...{},
|
||||||
|
...process.env,
|
||||||
|
...kopia.options.env,
|
||||||
|
...options.env,
|
||||||
|
};
|
||||||
|
|
||||||
|
let tokenIndex = args.findIndex((value) => {
|
||||||
|
return value.trim() == "--token";
|
||||||
|
});
|
||||||
|
let cleansedArgs = [...args];
|
||||||
|
if (tokenIndex >= 0) {
|
||||||
|
cleansedArgs[tokenIndex + 1] = "redacted";
|
||||||
|
}
|
||||||
|
|
||||||
|
const cleansedLog = `${command} ${cleansedArgs.join(" ")}`;
|
||||||
|
console.log("executing kopia command: %s", cleansedLog);
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
let stdin;
|
||||||
|
if (options.stdin) {
|
||||||
|
stdin = options.stdin;
|
||||||
|
delete options.stdin;
|
||||||
|
}
|
||||||
|
const child = kopia.options.executor.spawn(command, args, options);
|
||||||
|
if (stdin) {
|
||||||
|
child.stdin.write(stdin);
|
||||||
|
}
|
||||||
|
|
||||||
|
let stdout = "";
|
||||||
|
let stderr = "";
|
||||||
|
|
||||||
|
const log_progress_output = _.debounce(
|
||||||
|
(data) => {
|
||||||
|
const lines = data.split("\n");
|
||||||
|
/**
|
||||||
|
* get last line, remove spinner, etc
|
||||||
|
*/
|
||||||
|
const line = lines
|
||||||
|
.slice(-1)[0]
|
||||||
|
.trim()
|
||||||
|
.replace(/^[\/\\\-\|] /gi, "");
|
||||||
|
kopia.options.logger.info(
|
||||||
|
`kopia ${options.operation} progress: ${line.trim()}`
|
||||||
|
);
|
||||||
|
},
|
||||||
|
250,
|
||||||
|
{ leading: true, trailing: true, maxWait: 5000 }
|
||||||
|
);
|
||||||
|
|
||||||
|
child.stdout.on("data", function (data) {
|
||||||
|
data = String(data);
|
||||||
|
stdout += data;
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr.on("data", function (data) {
|
||||||
|
data = String(data);
|
||||||
|
stderr += data;
|
||||||
|
switch (options.operation) {
|
||||||
|
case "snapshot-create":
|
||||||
|
log_progress_output(data);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on("close", function (code) {
|
||||||
|
const result = { code, stdout, stderr, timeout: false };
|
||||||
|
|
||||||
|
if (!result.parsed) {
|
||||||
|
try {
|
||||||
|
result.parsed = JSON.parse(result.stdout);
|
||||||
|
} catch (err) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeout scenario
|
||||||
|
if (code === null) {
|
||||||
|
result.timeout = true;
|
||||||
|
reject(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code) {
|
||||||
|
reject(result);
|
||||||
|
} else {
|
||||||
|
resolve(result);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports.Kopia = Kopia;
|
||||||
|
|
@ -29,6 +29,9 @@ class NVMEoF {
|
||||||
nvmeof.logger = nvmeof.options.logger;
|
nvmeof.logger = nvmeof.options.logger;
|
||||||
} else {
|
} else {
|
||||||
nvmeof.logger = console;
|
nvmeof.logger = console;
|
||||||
|
console.verbose = function() {
|
||||||
|
console.log(...arguments);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -121,7 +124,7 @@ class NVMEoF {
|
||||||
} catch (err) {
|
} catch (err) {
|
||||||
if (
|
if (
|
||||||
err.stderr &&
|
err.stderr &&
|
||||||
(err.stderr.includes("already connnected") ||
|
(err.stderr.includes("already connected") ||
|
||||||
err.stderr.includes("Operation already in progress"))
|
err.stderr.includes("Operation already in progress"))
|
||||||
) {
|
) {
|
||||||
// idempotent
|
// idempotent
|
||||||
|
|
@ -273,11 +276,33 @@ class NVMEoF {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async pathExists(path) {
|
||||||
|
const nvmeof = this;
|
||||||
|
try {
|
||||||
|
await nvmeof.exec("stat", [
|
||||||
|
path,
|
||||||
|
]);
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
async nativeMultipathEnabled() {
|
async nativeMultipathEnabled() {
|
||||||
const nvmeof = this;
|
const nvmeof = this;
|
||||||
let result = await nvmeof.exec("cat", [
|
let result;
|
||||||
"/sys/module/nvme_core/parameters/multipath",
|
|
||||||
]);
|
try {
|
||||||
|
result = await nvmeof.exec("cat", [
|
||||||
|
"/sys/module/nvme_core/parameters/multipath",
|
||||||
|
]);
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code == 1 && err.stderr.includes("No such file or directory")) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
|
||||||
return result.stdout.trim() == "Y";
|
return result.stdout.trim() == "Y";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,369 @@
|
||||||
|
const cp = require("child_process");
|
||||||
|
const GeneralUtils = require("./general");
|
||||||
|
|
||||||
|
const DEFAULT_TIMEOUT = process.env.MOUNT_DEFAULT_TIMEOUT || 30000;
|
||||||
|
|
||||||
|
const EXIT_CODES = {
|
||||||
|
64: "administrator can not mount filesystems",
|
||||||
|
65: "unable to decrypt using passphrase",
|
||||||
|
78: "missing or invalid passphrase",
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* https://objectivefs.com/
|
||||||
|
*/
|
||||||
|
class ObjectiveFS {
|
||||||
|
constructor(options = {}) {
|
||||||
|
const objectivefs = this;
|
||||||
|
objectivefs.options = options;
|
||||||
|
|
||||||
|
options.paths = options.paths || {};
|
||||||
|
if (!options.paths.objectivefs) {
|
||||||
|
options.paths.objectivefs = "mount.objectivefs";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.paths.sudo) {
|
||||||
|
options.paths.sudo = "/usr/bin/sudo";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.paths.chroot) {
|
||||||
|
options.paths.chroot = "/usr/sbin/chroot";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.env) {
|
||||||
|
options.env = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.executor) {
|
||||||
|
options.executor = {
|
||||||
|
spawn: cp.spawn,
|
||||||
|
//spawn: cp.execFile,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mount.objectivefs [-o <opt>[,<opt>]..] <filesystem> <dir>
|
||||||
|
*
|
||||||
|
* @param {*} env
|
||||||
|
* @param {*} filesystem
|
||||||
|
* @param {*} target
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async mount(env, filesystem, target, options = []) {
|
||||||
|
if (!env) {
|
||||||
|
env = {};
|
||||||
|
}
|
||||||
|
const objectivefs = this;
|
||||||
|
let args = [];
|
||||||
|
if (options.length > 0) {
|
||||||
|
// TODO: maybe do -o <opt> -o <opt>?
|
||||||
|
args = args.concat(["-o", options.join(",")]);
|
||||||
|
}
|
||||||
|
args = args.concat([filesystem, target]);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await objectivefs.exec(
|
||||||
|
objectivefs.options.paths.objectivefs,
|
||||||
|
args,
|
||||||
|
{ env, operation: "mount" }
|
||||||
|
);
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mount.objectivefs create <your filesystem name>
|
||||||
|
* mount.objectivefs create -f <bucket>/<fs>
|
||||||
|
*
|
||||||
|
* @param {*} env
|
||||||
|
* @param {*} filesystem
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async create(env, filesystem, options = []) {
|
||||||
|
if (!env) {
|
||||||
|
env = {};
|
||||||
|
}
|
||||||
|
const objectivefs = this;
|
||||||
|
let args = ["create"];
|
||||||
|
args = args.concat(options);
|
||||||
|
args = args.concat([filesystem]);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await objectivefs.exec(
|
||||||
|
objectivefs.options.paths.objectivefs,
|
||||||
|
args,
|
||||||
|
{ env }
|
||||||
|
);
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code == 1 && err.stderr.includes("filesystem already exists")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* echo 'y' | mount.objectivefs destroy <bucket>/<fs>
|
||||||
|
*
|
||||||
|
* @param {*} env
|
||||||
|
* @param {*} filesystem
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async destroy(env, filesystem, options = []) {
|
||||||
|
const objectivefs = this;
|
||||||
|
if (!env) {
|
||||||
|
env = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
filesystem = await objectivefs.stripObjectStoreFromFilesystem(filesystem);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* delete safety checks for filesystem
|
||||||
|
*
|
||||||
|
* while it is possible to delete a fs without a pool we
|
||||||
|
* should never be doing that in democratic-csi
|
||||||
|
*/
|
||||||
|
let fs_parts = filesystem.split("/");
|
||||||
|
if (fs_parts.length != 2) {
|
||||||
|
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!fs_parts[0]) {
|
||||||
|
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
let pool = objectivefs.options.pool;
|
||||||
|
pool = await objectivefs.stripObjectStoreFromFilesystem(pool);
|
||||||
|
if (!pool) {
|
||||||
|
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (fs_parts[0].trim() != pool.trim()) {
|
||||||
|
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!fs_parts[1]) {
|
||||||
|
throw new Error(`filesystem safety check failed for fs: ${filesystem}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
let args = ["destroy"];
|
||||||
|
args = args.concat(options);
|
||||||
|
args = args.concat([filesystem]);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await objectivefs.exec(
|
||||||
|
"/bin/bash",
|
||||||
|
[
|
||||||
|
"-c",
|
||||||
|
`echo y | ${objectivefs.options.paths.objectivefs} ${args.join(" ")}`,
|
||||||
|
],
|
||||||
|
{ env }
|
||||||
|
);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
if (
|
||||||
|
err.code == 68 &&
|
||||||
|
err.stdout.includes("does not look like an ObjectiveFS filesystem")
|
||||||
|
) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
parseListOutput(data) {
|
||||||
|
const lines = data.split("\n");
|
||||||
|
let headers = [];
|
||||||
|
let entries = [];
|
||||||
|
lines.forEach((line, i) => {
|
||||||
|
if (line.length < 1) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const parts = line.split("\t");
|
||||||
|
if (i == 0) {
|
||||||
|
headers = parts.map((header) => {
|
||||||
|
return header.trim();
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let entry = {};
|
||||||
|
headers.forEach((name, index) => {
|
||||||
|
entry[name.trim()] = parts[index].trim();
|
||||||
|
});
|
||||||
|
|
||||||
|
entries.push(entry);
|
||||||
|
});
|
||||||
|
|
||||||
|
return entries;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mount.objectivefs list [-asvz] [<filesystem>[@<time>]]
|
||||||
|
*
|
||||||
|
* @param {*} env
|
||||||
|
* @param {*} filesystem
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async list(env, filesystem = null, options = []) {
|
||||||
|
if (!env) {
|
||||||
|
env = {};
|
||||||
|
}
|
||||||
|
const objectivefs = this;
|
||||||
|
let args = ["list"];
|
||||||
|
args = args.concat(options);
|
||||||
|
if (filesystem) {
|
||||||
|
args = args.concat([filesystem]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await objectivefs.exec(
|
||||||
|
objectivefs.options.paths.objectivefs,
|
||||||
|
args,
|
||||||
|
{ env }
|
||||||
|
);
|
||||||
|
|
||||||
|
return objectivefs.parseListOutput(result.stdout);
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* mount.objectivefs snapshot <filesystem>
|
||||||
|
*
|
||||||
|
* NOTE: fs must be mount on node to function
|
||||||
|
*
|
||||||
|
* @param {*} env
|
||||||
|
* @param {*} filesystem
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshot(env, filesystem = null, options = []) {
|
||||||
|
if (!env) {
|
||||||
|
env = {};
|
||||||
|
}
|
||||||
|
const objectivefs = this;
|
||||||
|
let args = ["list"];
|
||||||
|
args = args.concat(options);
|
||||||
|
if (filesystem) {
|
||||||
|
args = args.concat([filesystem]);
|
||||||
|
}
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
// NOTE: Successfully created snapshot: minio://ofs/test@2024-02-13T07:56:38Z (2024-02-13T00:56:38)
|
||||||
|
result = await objectivefs.exec(
|
||||||
|
objectivefs.options.paths.objectivefs,
|
||||||
|
args,
|
||||||
|
{ env }
|
||||||
|
);
|
||||||
|
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async getObjectStoreFromFilesystem(filesystem) {
|
||||||
|
if (filesystem.includes("://")) {
|
||||||
|
return GeneralUtils.before_string(filesystem, "://") + "://";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async stripObjectStoreFromFilesystem(filesystem) {
|
||||||
|
if (filesystem.includes("://")) {
|
||||||
|
return GeneralUtils.after_string(filesystem, "://");
|
||||||
|
}
|
||||||
|
return filesystem;
|
||||||
|
}
|
||||||
|
|
||||||
|
exec(command, args, options = {}) {
|
||||||
|
if (!options.hasOwnProperty("timeout")) {
|
||||||
|
options.timeout = DEFAULT_TIMEOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
const objectivefs = this;
|
||||||
|
args = args || [];
|
||||||
|
|
||||||
|
if (objectivefs.options.sudo) {
|
||||||
|
args.unshift(command);
|
||||||
|
command = objectivefs.options.paths.sudo;
|
||||||
|
}
|
||||||
|
|
||||||
|
options.env = { ...{}, ...objectivefs.options.env, ...options.env };
|
||||||
|
|
||||||
|
// truncate admin key during mount operations
|
||||||
|
if (options.operation == "mount") {
|
||||||
|
delete options.operation;
|
||||||
|
// standard license is 24
|
||||||
|
// admin key is 8
|
||||||
|
if (
|
||||||
|
options.env.OBJECTIVEFS_LICENSE &&
|
||||||
|
options.env.OBJECTIVEFS_LICENSE.length > 24
|
||||||
|
) {
|
||||||
|
options.env.OBJECTIVEFS_LICENSE =
|
||||||
|
options.env.OBJECTIVEFS_LICENSE.substr(0, 24);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
options.env.PATH = process.env.PATH;
|
||||||
|
|
||||||
|
const cleansedLog = `${command} ${args.join(" ")}`;
|
||||||
|
console.log("executing objectivefs command: %s", cleansedLog);
|
||||||
|
//console.log(options.env);
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
let stdin;
|
||||||
|
if (options.stdin) {
|
||||||
|
stdin = options.stdin;
|
||||||
|
delete options.stdin;
|
||||||
|
}
|
||||||
|
const child = objectivefs.options.executor.spawn(command, args, options);
|
||||||
|
if (stdin) {
|
||||||
|
child.stdin.write(stdin);
|
||||||
|
}
|
||||||
|
|
||||||
|
let stdout = "";
|
||||||
|
let stderr = "";
|
||||||
|
|
||||||
|
child.stdout.on("data", function (data) {
|
||||||
|
stdout = stdout + data;
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr.on("data", function (data) {
|
||||||
|
stderr = stderr + data;
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on("close", function (code) {
|
||||||
|
if (!stderr && EXIT_CODES[code]) {
|
||||||
|
stderr += EXIT_CODES[code];
|
||||||
|
}
|
||||||
|
|
||||||
|
const result = { code, stdout, stderr, timeout: false };
|
||||||
|
|
||||||
|
// timeout scenario
|
||||||
|
if (code === null) {
|
||||||
|
result.timeout = true;
|
||||||
|
reject(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code) {
|
||||||
|
reject(result);
|
||||||
|
} else {
|
||||||
|
resolve(result);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports.ObjectiveFS = ObjectiveFS;
|
||||||
|
|
@ -3,3 +3,7 @@ if (typeof String.prototype.replaceAll == "undefined") {
|
||||||
return this.replace(new RegExp(match, "g"), () => replace);
|
return this.replace(new RegExp(match, "g"), () => replace);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Array.prototype.random = function () {
|
||||||
|
return this[Math.floor(Math.random() * this.length)];
|
||||||
|
};
|
||||||
|
|
|
||||||
|
|
@ -48,6 +48,4 @@ class Registry {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const registry = new Registry();
|
module.exports.Registry = Registry;
|
||||||
|
|
||||||
module.exports = registry;
|
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,494 @@
|
||||||
|
const _ = require("lodash");
|
||||||
|
const cp = require("child_process");
|
||||||
|
|
||||||
|
const DEFAULT_TIMEOUT = process.env.RESTIC_DEFAULT_TIMEOUT || 90000;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* https://restic.net/
|
||||||
|
*/
|
||||||
|
class Restic {
|
||||||
|
constructor(options = {}) {
|
||||||
|
const restic = this;
|
||||||
|
restic.options = options;
|
||||||
|
|
||||||
|
options.paths = options.paths || {};
|
||||||
|
if (!options.paths.restic) {
|
||||||
|
options.paths.restic = "restic";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.paths.sudo) {
|
||||||
|
options.paths.sudo = "/usr/bin/sudo";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.paths.chroot) {
|
||||||
|
options.paths.chroot = "/usr/sbin/chroot";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.env) {
|
||||||
|
options.env = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.executor) {
|
||||||
|
options.executor = {
|
||||||
|
spawn: cp.spawn,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.logger) {
|
||||||
|
options.logger = console;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!options.global_flags) {
|
||||||
|
options.global_flags = [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic init
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async init(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = ["init", "--json"];
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await restic.exec(restic.options.paths.restic, args);
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code == 1 && err.stderr.includes("already")) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic unlock
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async unlock(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = ["unlock", "--json"];
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await restic.exec(restic.options.paths.restic, args);
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic backup
|
||||||
|
*
|
||||||
|
* @param {*} path
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async backup(path, options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["backup", "--json"]);
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
args = args.concat([path]);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await restic.exec(restic.options.paths.restic, args, {
|
||||||
|
operation: "backup",
|
||||||
|
timeout: 0,
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic tag
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async tag(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["tag", "--json"]);
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await restic.exec(restic.options.paths.restic, args, {
|
||||||
|
operation: "tag",
|
||||||
|
});
|
||||||
|
return result;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic snapshots
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshots(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["snapshots", "--json", "--no-lock"]);
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
restic.parseTagsFromArgs(args);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await restic.exec(restic.options.paths.restic, args, {
|
||||||
|
operation: "snapshots",
|
||||||
|
});
|
||||||
|
|
||||||
|
let snapshots = [];
|
||||||
|
result.parsed.forEach((item) => {
|
||||||
|
if (item.id) {
|
||||||
|
snapshots.push(item);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (item.snapshots) {
|
||||||
|
snapshots.push(...item.snapshots);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return snapshots;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic snapshots
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async snapshot_exists(snapshot_id) {
|
||||||
|
const restic = this;
|
||||||
|
const snapshots = await restic.snapshots([snapshot_id]);
|
||||||
|
return snapshots.length > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic forget
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async forget(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["forget", "--json"]);
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await restic.exec(restic.options.paths.restic, args, {
|
||||||
|
operation: "forget",
|
||||||
|
});
|
||||||
|
|
||||||
|
return result.parsed;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code == 1 && err.stderr.includes("no such file or directory")) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic stats
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async stats(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = [];
|
||||||
|
args = args.concat(["stats", "--json", "--no-lock"]);
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await restic.exec(restic.options.paths.restic, args, {
|
||||||
|
operation: "stats",
|
||||||
|
timeout: 0, // can take a very long time to gather up details
|
||||||
|
});
|
||||||
|
|
||||||
|
return result.parsed;
|
||||||
|
} catch (err) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* restic restore
|
||||||
|
*
|
||||||
|
* note that restore does not do any delete operations (ie: not like rsync --delete)
|
||||||
|
*
|
||||||
|
* @param {*} options
|
||||||
|
*/
|
||||||
|
async restore(options = []) {
|
||||||
|
const restic = this;
|
||||||
|
let args = ["restore", "--json", "--no-lock"];
|
||||||
|
args = args.concat(restic.options.global_flags);
|
||||||
|
args = args.concat(options);
|
||||||
|
|
||||||
|
let result;
|
||||||
|
try {
|
||||||
|
result = await restic.exec(restic.options.paths.restic, args, {
|
||||||
|
operation: "restore",
|
||||||
|
timeout: 0,
|
||||||
|
});
|
||||||
|
return result.parsed;
|
||||||
|
} catch (err) {
|
||||||
|
if (err.code == 1 && err.stderr.includes("Fatal:")) {
|
||||||
|
const lines = err.stderr.split("\n").filter((item) => {
|
||||||
|
return Boolean(String(item).trim());
|
||||||
|
});
|
||||||
|
const last_line = lines[lines.length - 1];
|
||||||
|
const ingored_count = (err.stderr.match(/ignoring error/g) || [])
|
||||||
|
.length;
|
||||||
|
|
||||||
|
restic.options.logger.info(
|
||||||
|
`restic ignored error count: ${ingored_count}`
|
||||||
|
);
|
||||||
|
restic.options.logger.info(`restic stderr last line: ${last_line}`);
|
||||||
|
|
||||||
|
// if ignored count matches total count move on
|
||||||
|
// "Fatal: There were 2484 errors"
|
||||||
|
if (last_line.includes(String(ingored_count))) {
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trimResultData(result, options = {}) {
|
||||||
|
const trim_output_limt = options.max_entries || 50;
|
||||||
|
// trim stdout/stderr/parsed lines to X number
|
||||||
|
if (result.parsed && Array.isArray(result.parsed)) {
|
||||||
|
result.parsed = result.parsed.slice(trim_output_limt * -1);
|
||||||
|
}
|
||||||
|
|
||||||
|
result.stderr = result.stderr
|
||||||
|
.split("\n")
|
||||||
|
.slice(trim_output_limt * -1)
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
result.stdout = result.stdout
|
||||||
|
.split("\n")
|
||||||
|
.slice(trim_output_limt * -1)
|
||||||
|
.join("\n");
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
parseTagsFromArgs(args) {
|
||||||
|
let tag_value_index;
|
||||||
|
let tags = args.filter((value, index) => {
|
||||||
|
if (String(value) == "--tag") {
|
||||||
|
tag_value_index = index + 1;
|
||||||
|
}
|
||||||
|
return tag_value_index == index;
|
||||||
|
});
|
||||||
|
|
||||||
|
tags = tags
|
||||||
|
.map((value) => {
|
||||||
|
if (value.includes(",")) {
|
||||||
|
return value.split(",");
|
||||||
|
}
|
||||||
|
return [value];
|
||||||
|
})
|
||||||
|
.flat();
|
||||||
|
return tags;
|
||||||
|
}
|
||||||
|
|
||||||
|
exec(command, args, options = {}) {
|
||||||
|
if (!options.hasOwnProperty("timeout")) {
|
||||||
|
options.timeout = DEFAULT_TIMEOUT;
|
||||||
|
}
|
||||||
|
|
||||||
|
const restic = this;
|
||||||
|
args = args || [];
|
||||||
|
|
||||||
|
if (restic.options.sudo) {
|
||||||
|
args.unshift(command);
|
||||||
|
command = restic.options.paths.sudo;
|
||||||
|
}
|
||||||
|
|
||||||
|
options.env = {
|
||||||
|
...{},
|
||||||
|
...process.env,
|
||||||
|
...restic.options.env,
|
||||||
|
...options.env,
|
||||||
|
};
|
||||||
|
|
||||||
|
const cleansedLog = `${command} ${args.join(" ")}`;
|
||||||
|
console.log("executing restic command: %s", cleansedLog);
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
let stdin;
|
||||||
|
if (options.stdin) {
|
||||||
|
stdin = options.stdin;
|
||||||
|
delete options.stdin;
|
||||||
|
}
|
||||||
|
const child = restic.options.executor.spawn(command, args, options);
|
||||||
|
if (stdin) {
|
||||||
|
child.stdin.write(stdin);
|
||||||
|
}
|
||||||
|
|
||||||
|
let stdout = "";
|
||||||
|
let stderr = "";
|
||||||
|
let code_override;
|
||||||
|
|
||||||
|
const log_progress_output = _.debounce(
|
||||||
|
(data) => {
|
||||||
|
let snapshot_id;
|
||||||
|
let path;
|
||||||
|
switch (options.operation) {
|
||||||
|
case "backup":
|
||||||
|
snapshot_id = `unknown_creating_new_snapshot_in_progress`;
|
||||||
|
path = args[args.length - 1];
|
||||||
|
break;
|
||||||
|
case "restore":
|
||||||
|
snapshot_id = args
|
||||||
|
.find((value) => {
|
||||||
|
return String(value).includes(":");
|
||||||
|
})
|
||||||
|
.split(":")[0];
|
||||||
|
|
||||||
|
let path_index;
|
||||||
|
path = args.find((value, index) => {
|
||||||
|
if (String(value) == "--target") {
|
||||||
|
path_index = index + 1;
|
||||||
|
}
|
||||||
|
return path_index == index;
|
||||||
|
});
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data.message_type == "status") {
|
||||||
|
delete data.current_files;
|
||||||
|
restic.options.logger.info(
|
||||||
|
`restic ${options.operation} progress: snapshot_id=${snapshot_id}, path=${path}`,
|
||||||
|
data
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data.message_type == "summary") {
|
||||||
|
restic.options.logger.info(
|
||||||
|
`restic ${options.operation} summary: snapshot_id=${snapshot_id}, path=${path}`,
|
||||||
|
data
|
||||||
|
);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
250,
|
||||||
|
{ leading: true, trailing: true, maxWait: 5000 }
|
||||||
|
);
|
||||||
|
|
||||||
|
child.stdout.on("data", function (data) {
|
||||||
|
data = String(data);
|
||||||
|
stdout += data;
|
||||||
|
switch (options.operation) {
|
||||||
|
case "backup":
|
||||||
|
case "restore":
|
||||||
|
try {
|
||||||
|
let parsed = JSON.parse(data);
|
||||||
|
log_progress_output(parsed);
|
||||||
|
} catch (err) {}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.stderr.on("data", function (data) {
|
||||||
|
data = String(data);
|
||||||
|
stderr += data;
|
||||||
|
if (
|
||||||
|
["forget", "snapshots"].includes(options.operation) &&
|
||||||
|
stderr.includes("no such file or directory")
|
||||||
|
) {
|
||||||
|
// short-circut the operation vs waiting for all the retries
|
||||||
|
// https://github.com/restic/restic/pull/2515
|
||||||
|
switch (options.operation) {
|
||||||
|
case "forget":
|
||||||
|
code_override = 1;
|
||||||
|
break;
|
||||||
|
case "snapshots":
|
||||||
|
code_override = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
child.kill();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
child.on("close", function (code) {
|
||||||
|
const result = { code, stdout, stderr, timeout: false };
|
||||||
|
|
||||||
|
if (!result.parsed) {
|
||||||
|
try {
|
||||||
|
result.parsed = JSON.parse(result.stdout);
|
||||||
|
} catch (err) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!result.parsed) {
|
||||||
|
try {
|
||||||
|
const lines = result.stdout.split("\n");
|
||||||
|
const parsed = [];
|
||||||
|
lines.forEach((line) => {
|
||||||
|
if (!line) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
parsed.push(JSON.parse(line.trim()));
|
||||||
|
});
|
||||||
|
result.parsed = parsed;
|
||||||
|
} catch (err) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* normalize array responses in scenarios where not enough came through
|
||||||
|
* to add newlines
|
||||||
|
*/
|
||||||
|
if (result.parsed && options.operation == "backup") {
|
||||||
|
if (!Array.isArray(result.parsed)) {
|
||||||
|
result.parsed = [result.parsed];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code == null && code_override != null) {
|
||||||
|
code = code_override;
|
||||||
|
}
|
||||||
|
|
||||||
|
// timeout scenario
|
||||||
|
if (code === null) {
|
||||||
|
result.timeout = true;
|
||||||
|
reject(result);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (code) {
|
||||||
|
reject(result);
|
||||||
|
} else {
|
||||||
|
resolve(result);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports.Restic = Restic;
|
||||||
Loading…
Reference in New Issue