Compare commits

..

No commits in common. "master" and "v1.0.0" have entirely different histories.

184 changed files with 3460 additions and 41750 deletions

View File

@ -1,9 +1,8 @@
** chart
dev
!/bin examples
!/csi_proto contrib
!/csi_proxy_proto node_modules
!/docker Dockerfile*
!/LICENSE TODO.md
!/package*.json .git
!/src

View File

@ -1,44 +0,0 @@
#!/bin/bash
set -e
echo "$DOCKER_PASSWORD" | docker login docker.io -u "$DOCKER_USERNAME" --password-stdin
echo "$GHCR_PASSWORD" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin
export DOCKER_ORG="democraticcsi"
export DOCKER_PROJECT="democratic-csi"
export DOCKER_REPO="docker.io/${DOCKER_ORG}/${DOCKER_PROJECT}"
export GHCR_ORG="democratic-csi"
export GHCR_PROJECT="democratic-csi"
export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}"
export MANIFEST_NAME="democratic-csi-combined:${IMAGE_TAG}"
if [[ -n "${IMAGE_TAG}" ]]; then
# create local manifest to work with
buildah manifest rm "${MANIFEST_NAME}" || true
buildah manifest create "${MANIFEST_NAME}"
# all all the existing linux data to the manifest
buildah manifest add "${MANIFEST_NAME}" --all "${DOCKER_REPO}:${IMAGE_TAG}"
buildah manifest inspect "${MANIFEST_NAME}"
# import pre-built images
buildah pull docker-archive:democratic-csi-windows-ltsc2019.tar
buildah pull docker-archive:democratic-csi-windows-ltsc2022.tar
# add pre-built images to manifest
buildah manifest add "${MANIFEST_NAME}" democratic-csi-windows:${GITHUB_RUN_ID}-ltsc2019
buildah manifest add "${MANIFEST_NAME}" democratic-csi-windows:${GITHUB_RUN_ID}-ltsc2022
buildah manifest inspect "${MANIFEST_NAME}"
# push manifest
buildah manifest push --all "${MANIFEST_NAME}" docker://${DOCKER_REPO}:${IMAGE_TAG}
buildah manifest push --all "${MANIFEST_NAME}" docker://${GHCR_REPO}:${IMAGE_TAG}
# cleanup
buildah manifest rm "${MANIFEST_NAME}" || true
else
:
fi

View File

@ -1,23 +1,25 @@
#!/bin/bash #!/bin/bash
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
echo "$GHCR_PASSWORD" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin
export DOCKER_ORG="democraticcsi" export DOCKER_ORG="democraticcsi"
export DOCKER_PROJECT="democratic-csi" export DOCKER_PROJECT="democratic-csi"
export DOCKER_REPO="${DOCKER_ORG}/${DOCKER_PROJECT}" export DOCKER_REPO="${DOCKER_ORG}/${DOCKER_PROJECT}"
export GHCR_ORG="democratic-csi" if [[ $GITHUB_REF == refs/tags/* ]]; then
export GHCR_PROJECT="democratic-csi" export GIT_TAG=${GITHUB_REF#refs/tags/}
export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}" else
export GIT_BRANCH=${GITHUB_REF#refs/heads/}
fi
if [[ -n "${IMAGE_TAG}" ]]; then if [[ -n "${GIT_TAG}" ]]; then
# -t ${GHCR_REPO}:${IMAGE_TAG} docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_TAG} .
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \ elif [[ -n "${GIT_BRANCH}" ]]; then
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \ if [[ "${GIT_BRANCH}" == "master" ]]; then
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \ docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:latest .
--build-arg OBJECTIVEFS_DOWNLOAD_ID=${OBJECTIVEFS_DOWNLOAD_ID} \ else
. docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_BRANCH} .
fi
else else
: :
fi fi

View File

@ -1,5 +1,3 @@
# https://www.truenas.com/software-status/
name: CI name: CI
on: on:
@ -11,451 +9,23 @@ on:
- next - next
jobs: jobs:
cancel-previous-runs: build:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Cancel Previous Runs - name: Cancel Previous Runs
uses: styfle/cancel-workflow-action@0.12.1 uses: styfle/cancel-workflow-action@0.6.0
with: with:
access_token: ${{ github.token }} access_token: ${{ github.token }}
- uses: actions/checkout@v2
build-npm-linux-amd64:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
- shell: bash
name: npm install
run: |
ci/bin/build.sh
- name: upload build
uses: actions/upload-artifact@v4
with:
name: node-modules-linux-amd64
path: node_modules-linux-amd64.tar.gz
retention-days: 1
build-npm-windows-amd64:
runs-on: windows-2022
steps:
- uses: actions/checkout@v4
- uses: actions/setup-node@v4
with:
node-version: 20
- shell: pwsh
name: npm install
run: |
ci\bin\build.ps1
- name: upload build
uses: actions/upload-artifact@v4
with:
name: node-modules-windows-amd64
path: node_modules-windows-amd64.tar.gz
retention-days: 1
csi-sanity-synology-dsm6:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- synlogy/dsm6/iscsi.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-synology
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
SYNOLOGY_HOST: ${{ secrets.SANITY_SYNOLOGY_DSM6_HOST }}
SYNOLOGY_PORT: ${{ secrets.SANITY_SYNOLOGY_DSM6_PORT }}
SYNOLOGY_USERNAME: ${{ secrets.SANITY_SYNOLOGY_USERNAME }}
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
csi-sanity-synology-dsm7:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- synlogy/dsm7/iscsi.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-synology
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
SYNOLOGY_HOST: ${{ secrets.SANITY_SYNOLOGY_DSM7_HOST }}
SYNOLOGY_PORT: ${{ secrets.SANITY_SYNOLOGY_DSM7_PORT }}
SYNOLOGY_USERNAME: ${{ secrets.SANITY_SYNOLOGY_USERNAME }}
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
csi-sanity-truenas-scale-24_04:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- truenas/scale/24.04/scale-iscsi.yaml
- truenas/scale/24.04/scale-nfs.yaml
# 80 char limit
- truenas/scale/24.04/scale-smb.yaml
runs-on:
- self-hosted
- Linux
- X64
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_SCALE_24_04_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
# ssh-based drivers
csi-sanity-truenas-core-13_0:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- truenas/core/13.0/core-iscsi.yaml
- truenas/core/13.0/core-nfs.yaml
# 80 char limit
- truenas/core/13.0/core-smb.yaml
runs-on:
- self-hosted
- Linux
- X64
#- csi-sanity-truenas
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
TRUENAS_HOST: ${{ secrets.SANITY_TRUENAS_CORE_13_0_HOST }}
TRUENAS_USERNAME: ${{ secrets.SANITY_TRUENAS_USERNAME }}
TRUENAS_PASSWORD: ${{ secrets.SANITY_TRUENAS_PASSWORD }}
# ssh-based drivers
csi-sanity-zfs-generic:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
max-parallel: 1
matrix:
config:
- zfs-generic/iscsi.yaml
- zfs-generic/nfs.yaml
- zfs-generic/smb.yaml
- zfs-generic/nvmeof.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-zfs-generic
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
# client drivers
csi-sanity-objectivefs:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- objectivefs/objectivefs.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
OBJECTIVEFS_POOL: ${{ secrets.SANITY_OBJECTIVEFS_POOL }}
OBJECTIVEFS_LICENSE: ${{ secrets.SANITY_OBJECTIVEFS_LICENSE }}
OBJECTIVEFS_OBJECTSTORE: ${{ secrets.SANITY_OBJECTIVEFS_OBJECTSTORE }}
OBJECTIVEFS_ENDPOINT_PROTOCOL: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PROTOCOL }}
OBJECTIVEFS_ENDPOINT_HOST: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_HOST }}
OBJECTIVEFS_ENDPOINT_PORT: ${{ secrets.SANITY_OBJECTIVEFS_ENDPOINT_PORT }}
OBJECTIVEFS_SECRET_KEY: ${{ secrets.SANITY_OBJECTIVEFS_SECRET_KEY }}
OBJECTIVEFS_ACCESS_KEY: ${{ secrets.SANITY_OBJECTIVEFS_ACCESS_KEY }}
OBJECTIVEFS_PASSPHRASE: ${{ secrets.SANITY_OBJECTIVEFS_PASSPHRASE }}
# these secrets need to match the above secrets for staging/etc
CSI_SANITY_SECRETS: /root/csi-secrets/objectivefs-secrets.yaml
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
# client drivers
csi-sanity-client:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- client/nfs.yaml
- client/smb.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
SHARE_NAME: tank_client_smb
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
csi-sanity-client-windows:
needs:
- build-npm-windows-amd64
strategy:
fail-fast: false
matrix:
config:
- client\smb.yaml
runs-on:
- self-hosted
- Windows
- X64
- csi-sanity-client
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-windows-amd64
- name: csi-sanity
run: |
# run tests
ci\bin\run.ps1
env:
TEMPLATE_CONFIG_FILE: ".\\ci\\configs\\${{ matrix.config }}"
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
SHARE_NAME: tank_client_smb
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
# zfs-local drivers
csi-sanity-zfs-local:
needs:
- build-npm-linux-amd64
strategy:
fail-fast: false
matrix:
config:
- zfs-local/zvol.yaml
- zfs-local/dataset.yaml
runs-on:
- self-hosted
- Linux
- X64
- csi-sanity-zfs-local
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-linux-amd64
- name: csi-sanity
run: |
# run tests
ci/bin/run.sh
env:
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
# local-hostpath driver
csi-sanity-local-hostpath:
needs:
- build-npm-linux-amd64
- build-npm-windows-amd64
strategy:
fail-fast: false
matrix:
os: [Linux, Windows]
include:
- os: Linux
npmartifact: node-modules-linux-amd64
template: "./ci/configs/local-hostpath/basic.yaml"
run: |
# run tests
ci/bin/run.sh
- os: Windows
npmartifact: node-modules-windows-amd64
template: ".\\ci\\configs\\local-hostpath\\basic.yaml"
run: |
# run tests
ci\bin\run.ps1
runs-on:
- self-hosted
- ${{ matrix.os }}
- X64
- csi-sanity-local-hostpath
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: ${{ matrix.npmartifact }}
- name: csi-sanity
run: ${{ matrix.run }}
env:
TEMPLATE_CONFIG_FILE: "${{ matrix.template }}"
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
csi-sanity-windows-node:
needs:
- build-npm-windows-amd64
strategy:
fail-fast: false
matrix:
config:
- windows\iscsi.yaml
- windows\smb.yaml
runs-on:
- self-hosted
- Windows
- X64
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: node-modules-windows-amd64
- name: csi-sanity
run: |
# run tests
ci\bin\run.ps1
env:
TEMPLATE_CONFIG_FILE: ".\\ci\\configs\\${{ matrix.config }}"
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
CSI_SANITY_FOCUS: "Node Service"
determine-image-tag:
runs-on: ubuntu-latest
outputs:
tag: ${{ steps.tag.outputs.tag }}
steps:
- id: tag
run: |
if [[ $GITHUB_REF == refs/tags/* ]]; then
export GIT_TAG=${GITHUB_REF#refs/tags/}
else
export GIT_BRANCH=${GITHUB_REF#refs/heads/}
fi
if [[ -n "${GIT_TAG}" ]]; then
echo "::set-output name=tag::${GIT_TAG}"
elif [[ -n "${GIT_BRANCH}" ]]; then
if [[ "${GIT_BRANCH}" == "master" ]]; then
echo "::set-output name=tag::latest"
else
echo "::set-output name=tag::${GIT_BRANCH}"
fi
else
:
fi
build-docker-linux:
needs:
- determine-image-tag
- csi-sanity-synology-dsm6
- csi-sanity-synology-dsm7
- csi-sanity-truenas-scale-24_04
- csi-sanity-truenas-core-13_0
- csi-sanity-zfs-generic
- csi-sanity-objectivefs
- csi-sanity-client
- csi-sanity-client-windows
- csi-sanity-zfs-local
- csi-sanity-local-hostpath
- csi-sanity-windows-node
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: docker build - name: docker build
run: | run: |
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64") export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
mkdir -p ~/.docker/cli-plugins/ mkdir -p ~/.docker/cli-plugins/
wget -qO ~/.docker/cli-plugins/docker-buildx https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-${ARCH} wget -qO ~/.docker/cli-plugins/docker-buildx https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-${ARCH}
chmod a+x ~/.docker/cli-plugins/docker-buildx chmod a+x ~/.docker/cli-plugins/docker-buildx
docker info docker info
docker buildx version docker buildx version
docker buildx ls
docker run --rm --privileged multiarch/qemu-user-static --reset -p yes docker run --rm --privileged multiarch/qemu-user-static --reset -p yes
docker buildx create --name xbuilder --use docker buildx create --name xbuilder --use
docker buildx inspect --bootstrap docker buildx inspect --bootstrap
@ -464,83 +34,5 @@ jobs:
env: env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }} DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
OBJECTIVEFS_DOWNLOAD_ID: ${{ secrets.OBJECTIVEFS_DOWNLOAD_ID }}
DOCKER_CLI_EXPERIMENTAL: enabled DOCKER_CLI_EXPERIMENTAL: enabled
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm/v7,linux/arm64
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
build-docker-windows:
needs:
- csi-sanity-synology-dsm6
- csi-sanity-synology-dsm7
- csi-sanity-truenas-scale-24_04
- csi-sanity-truenas-core-13_0
- csi-sanity-zfs-generic
- csi-sanity-objectivefs
- csi-sanity-client
- csi-sanity-client-windows
- csi-sanity-zfs-local
- csi-sanity-local-hostpath
- csi-sanity-windows-node
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-2019, windows-2022]
include:
- os: windows-2019
core_base_tag: ltsc2019
nano_base_tag: "1809"
file: Dockerfile.Windows
- os: windows-2022
core_base_tag: ltsc2022
nano_base_tag: ltsc2022
file: Dockerfile.Windows
steps:
- uses: actions/checkout@v4
- name: docker build
shell: bash
run: |
docker info
docker build --pull -f ${{ matrix.file }} --build-arg NANO_BASE_TAG=${{ matrix.nano_base_tag }} --build-arg CORE_BASE_TAG=${{ matrix.core_base_tag }} -t democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} \
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
.
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
- name: upload image tar
uses: actions/upload-artifact@v4
with:
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
retention-days: 1
push-docker-windows:
needs:
- build-docker-linux
- build-docker-windows
- determine-image-tag
runs-on:
- self-hosted
- buildah
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
name: democratic-csi-windows-ltsc2019.tar
- uses: actions/download-artifact@v4
with:
name: democratic-csi-windows-ltsc2022.tar
- name: push windows images with buildah
run: |
#.github/bin/install_latest_buildah.sh
buildah version
.github/bin/docker-release-windows.sh
env:
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
DOCKER_CLI_EXPERIMENTAL: enabled
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}

2
.gitignore vendored
View File

@ -1,4 +1,2 @@
**~
node_modules node_modules
dev dev
/ci/bin/*dev*

View File

@ -1,391 +0,0 @@
# v1.9.4
Release 2024-07-06
- minor doc updates
# v1.9.3
Released 2024-06-01
- minor fixes for objectivefs and iscsi
# v1.9.2
Released 2024-05-23
- minor fixes for objectivefs and iscsi
# v1.9.1
Released 2024-05-06
- fix iscsi hostname lookup regression (#393)
- fix resize issue (#390)
- fix Probe issue (#385)
# v1.9.0
Released 2024-03-26
- new `objectivefs` driver (https://objectivefs.com) support available for x86_64 and arm64
- TrueNAS
- SCALE 24.04 support
- fix `sudo` issue during resize operations (see #295)
- fix version detection logic and default to api version 2 (see #351)
- more robust `Probe` implementation
- contaimer images
- various fixes, improvements, dep upgrades, etc
- update container images to `debian:12` (bookworm)
- bump to nodejs-lts-iron from nodejs-lts-hydrogen
- support csi v1.6.0-v1.9.0
- allow `noop` delete operations (dangerous, only use if you _really_ know what you are doing, see #289)
- properly adhere to the `zvolDedup` and `zvolCompression` settings (see #322)
- `restic` and `kopia` support as a snapshot solution for `local-hostpath` and `*-client` drivers
# v1.8.4
Released 2023-11-09
- allow templatized `volume_id` (dangerous, only use if you _really_ know what you are doing)
- fix TrueNAS SCALE iscsi resize issue
- TrueNAS SCALE 23.10 support
- minor improvements/fixes throughout
- dependency updates
# v1.8.3
Released 2023-04-05
- fix invalid `access_mode` logic (see #287)
# v1.8.2
Released 2023-04-02
- more comprehensive support to manually set `access_modes`
- more intelligent handling of `access_modes` when `access_type=block`
- https://github.com/ceph/ceph-csi/blob/devel/examples/README.md#how-to-test-rbd-multi_node_multi_writer-block-feature
- others? allow this by default
- remove older versions of TrueNAS from ci
# v1.8.1
Released 2023-02-25
- minor fixes
- updated `nvmeof` docs
# v1.8.0
Released 2023-02-23
- `nvmeof` support
# v1.7.7
Released 2022-10-17
- support `csi.access_modes` config value in all zfs-based drivers
- bump deps
# v1.7.6
Released 2022-08-06
- support fo `talos.dev` clusters
- dep bumps
# v1.7.5
Released 2022-08-02
- improved ipv6 iscsi support
- allow using `blkid` for filesystem detection on block devices
# v1.7.4
Released 2022-07-29
- improved ipv6 iscsi support
# v1.7.3
Released 2022-07-28
- more stringent block device lookup logic (see #215)
- ipv6 iscsi support
- dependency bumps
- minor fixes throughout
# v1.7.2
Released 2022-06-28
- support for inode stats
- doc updates
- bump deps
# v1.7.1
Released 2022-06-14
- support for the alpha TrueNAS SCALE 22.12
- Fix invalid class reference
# v1.7.0
Released 2022-06-08
The windows release.
- windows smb, iscsi, and local-hostpath support (requires chart `v0.13.0+`)
- ntfs, exfat, vfat fs support
- `zfs-generic-smb` driver
- synology improvements
- DSM7 support
- synology enhancements to allow templates to be configured at various
'levels'
- testing improvements
- support (for testing) generating volume_id from name
- test all the smb variants
- test all nfs/smb client drivers
- misc fixes
- wait for chown/chmod jobs to complete (freenas)
- general improvement to smb behavior throughout
- better logging
- better sudo logic throughout
- minor fixes throughout
- more robust logic for connecting to iscsi devices with partition tables
- massive performance improvement for ssh-based drivers (reusing existing
connection instead of new connection per-command)
- dep bumps
- trimmed container images
- windows container images for 2019 and 2022
# v1.6.3
Released 2022-04-08
- dep bumps
- more secure permissions on the socket file
# v1.6.2
Released 2022-04-06
- dep bumps
- optimize via object instance reuse of various clients etc
- graceful shutdown of the grpc server
# v1.6.1
Released 2022-03-23
- include `rsync` binary in docker image (see #166)
- minor improvements to logging
- bump deps
# v1.6.0
Released 2022-03-16
This is a **massive** release with substantial changes. Ideally this release
should be installed with chart version `>=0.11.0`. Make note that due to the
updated base image from `buster` to `bullseye` that the filesystem tools have
all been updated as well (`mkfs.foo`, `resize2fs`, `fsck.foo`, etc).
To facilitate the removal `grpc-uds` package a new sister project was created:
https://github.com/democratic-csi/csi-grpc-proxy
Not all environments require the usage of the proxy, but it is enabled by
default with `helm` chart versions `>=0.11.0`.
- update `nodejs` version to `v16`
- remove dependency on `grpc-uds` package (replaced entirely by
`@grpc/grpc-js`)
- remove dependency on `request` package (replaced by `axios`)
- use native `timeout` functionality for `spawn` operations
- update http clients to use `keep-alive` logic
- add a default 30s `timeout` to `iscsiadm` commands
- base docker image on `bullseye`
- support for `btrfs` as a `fs_type`
- support `s390x` and `ppc64le` docker images
# v1.5.4
Released 2022-03-03
- more descriptive error message for breaking changes introduced in `v1.5.3`
# v1.5.3
Released 2022-03-02
- support for running `freenas-iscsi` and `freenas-nfs` `sudo`-less (see #151)
- BREAKING CHANGE for `freenas-nfs`, if set `datasetPermissionsUser` and
`datasetPermissionsGroup` must be numeric user/group IDs, alpha values such
as `root` and `wheel` will no longer work
- more robust `chown` / `chmod` logic for all zfs drivers
- allow for setting extent comment/description in `freenas-iscsi` and
`freenas-api-iscsi` (see #158)
# v1.5.2
Released 2022-02-24
- proper capacity reporting for `controller-client-common`
# v1.5.1
Released 2022-02-23
- fix ci flakes
- better support running `zfs` commands as non-root with `delegated`
permissions
# v1.5.0
Released 2022-02-23
- massive ci overhaul
- add additional drivers
- add additional TrueNAS server versions
- only build `node_modules` once by using artifacts
- support allow/block listing specific tests
- better logic waiting for driver socket to appear
- introduce `zfs-local-dataset` driver (see #148)
- introduce `zfs-local-zvol` driver (see #148)
- introduce `local-hostpath` driver
- support manually provisioned (`node-manual`) `oneclient` volumes
# v1.4.4
Released 2021-12-11
- better adherence to expected csi behavior when volume request for less than
minimum volume size is requested (see #137)
- avoid unnecessary data copy for `ListVolumes` operation
# v1.4.3
Released 2021-12-01
- more appropriate handling of `size_bytes` for snapshots
- more robust handling of `NodePublishVolume` to ensure the staging path is
actually mounted
- allow control of the `mount` / `umount` / `findmnt` command timeout via
`MOUNT_DEFAULT_TIMEOUT` env var
- minor fix for `zfs-generic-iscsi` with `targetCli` to work-around Ubuntu
18:04 bug (see #127)
# v1.4.2
Released 2021-09-29
- general improvements to help ci
- cover most drivers with ci
# v1.4.1
Released 2021-09-21
- `k8s-csi-cleaner` script (see #81)
- bump deps
# v1.4.0
Released 2021-09-21
- more advanced logic for iscsi naming limits (allowing > 63 chars in certain
circumstances, SCALE, linux, FreeBSD 13+)
- various updates to support running the csi-test tool and conform to expected
responses/behaviors (full conformance for several drivers!)
- default `fs_type` during `NodeStageVolume` when omitted by `CO`
- automatcally add `guest` mount option to `cifs` shares when creds are absent
- fix `ListVolumes` and `ListSnapshot` behavior on various `zfs-generic-*` and
`freenas-*` drivers
# v1.3.2
Released 2021-09-09
- fix missing `break` in the `node-manual` driver using `smb` / `cifs`
# v1.3.1
Released 2021-09-08
- support using a template for nfs share comment in `freenas-nfs` and
`freenas-api-nfs` (see #115)
# v1.3.0
Released 2021-09-02
- use `ghcr.io` for images as well as docker hub (#90)
- introduce api-only drivers for freenas (`freenas-api-*`)
- `smb-client` driver which creates folders on an smb share
- `lustre-client` driver which creates folders on a lustre share
attaching to various volumes which have been pre-provisioned by the operator
- `synology-iscsi` driver
- various documentation improvements
- support for csi versions `1.4.0` and `1.5.0`
- reintroduce advanced options that allow control over `fsck` (#85)
- advanced options for customizing `mkfs` commands
- better handling of stale nfs connections
- do not log potentially sensitive data in mount commands
- timeouts on various commands to improve driver operations under adverse
conditions
- various fixes and improvements throughout
- dependency bumps
# v1.2.0
Released 2021-05-12
- add `node-manual` driver
# v1.1.3
Released 2021-04-25
- remove `--force` from unmounts
- proper `iqn` logic for rescans
# v1.1.2
Released 2021-04-12
- fix for hostname based portals
- dependency bumps
# v1.1.1
Released 2021-04-12
- rescan iscsi sessions after login during stage call
# v1.1.0
Released 2021-02-21
- support for csi-v1.3.0
- fix a snapshot issue when requested with specific `snapshot_id`
# v1.0.1
Released 2021-01-29
- targetCli fixes when used in conjunction with `nameTemplate` (see #49)
- multi-stage docker builds to shrink image size dramatically
- using pre-compiled grpc binaries to dramatically speed build times
- dep updates
- remove `fsck` during stage operations due to sig-storage recommendations (see #52)
# v1.0.0
Released 2021-01-07
- initial release

View File

@ -1,4 +1,4 @@
FROM debian:12-slim AS build FROM debian:10-slim AS build
#FROM --platform=$BUILDPLATFORM debian:10-slim AS build #FROM --platform=$BUILDPLATFORM debian:10-slim AS build
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
@ -9,14 +9,14 @@ ARG BUILDPLATFORM
RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM" RUN echo "I am running build on $BUILDPLATFORM, building for $TARGETPLATFORM"
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8 ENV LANG=en_US.utf8
ENV NODE_VERSION=v20.11.1 ENV NODE_VERSION=v12.20.0
ENV NODE_ENV=production #ENV NODE_VERSION=v14.15.1
# install build deps # install build deps
RUN apt-get update && apt-get install -y python3 make cmake gcc g++ RUN apt-get update && apt-get install -y python make gcc g++
# install node # install node
RUN apt-get update && apt-get install -y wget xz-utils RUN apt-get update && apt-get install -y wget xz-utils
@ -26,13 +26,13 @@ ENV PATH=/usr/local/lib/nodejs/bin:$PATH
# Run as a non-root user # Run as a non-root user
RUN useradd --create-home csi \ RUN useradd --create-home csi \
&& mkdir /home/csi/app \ && mkdir /home/csi/app \
&& chown -R csi: /home/csi && chown -R csi: /home/csi
WORKDIR /home/csi/app WORKDIR /home/csi/app
USER csi USER csi
COPY --chown=csi:csi package*.json ./ COPY package*.json ./
RUN npm install --only=production --grpc_node_binary_host_mirror=https://grpc-uds-binaries.s3-us-west-2.amazonaws.com/debian-buster RUN npm install
COPY --chown=csi:csi . . COPY --chown=csi:csi . .
RUN rm -rf docker RUN rm -rf docker
@ -40,68 +40,36 @@ RUN rm -rf docker
###################### ######################
# actual image # actual image
###################### ######################
FROM debian:12-slim FROM debian:10-slim
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.licenses MIT
ENV DEBIAN_FRONTEND=noninteractive ENV DEBIAN_FRONTEND=noninteractive
ENV DEMOCRATIC_CSI_IS_CONTAINER=true
ARG TARGETPLATFORM ARG TARGETPLATFORM
ARG BUILDPLATFORM ARG BUILDPLATFORM
ARG OBJECTIVEFS_DOWNLOAD_ID
RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM" RUN echo "I am running on final $BUILDPLATFORM, building for $TARGETPLATFORM"
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \ RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 && localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8 ENV LANG=en_US.utf8
ENV NODE_ENV=production
# Workaround for https://github.com/nodejs/node/issues/37219
RUN test $(uname -m) != armv7l || ( \
apt-get update \
&& apt-get install -y libatomic1 \
&& rm -rf /var/lib/apt/lists/* \
)
# install node # install node
#ENV PATH=/usr/local/lib/nodejs/bin:$PATH ENV PATH=/usr/local/lib/nodejs/bin:$PATH
#COPY --from=build /usr/local/lib/nodejs /usr/local/lib/nodejs COPY --from=build /usr/local/lib/nodejs /usr/local/lib/nodejs
COPY --from=build /usr/local/lib/nodejs/bin/node /usr/local/bin/node
# node service requirements # node service requirements
# netbase is required by rpcbind/rpcinfo to work properly # netbase is required by rpcbind/rpcinfo to work properly
# /etc/{services,rpc} are required # /etc/{services,rpc} are required
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y wget netbase zip bzip2 socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync procps util-linux nvme-cli fuse3 && \ apt-get install -y netbase socat e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
ARG RCLONE_VERSION=1.66.0
ADD docker/rclone-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/rclone-installer.sh && rclone-installer.sh
ARG RESTIC_VERSION=0.16.4
ADD docker/restic-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/restic-installer.sh && restic-installer.sh
ARG KOPIA_VERSION=0.16.1
ADD docker/kopia-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/kopia-installer.sh && kopia-installer.sh
# controller requirements # controller requirements
#RUN apt-get update && \ #RUN apt-get update && \
# apt-get install -y ansible && \ # apt-get install -y ansible && \
# rm -rf /var/lib/apt/lists/* # rm -rf /var/lib/apt/lists/*
# install objectivefs
ARG OBJECTIVEFS_VERSION=7.2
ADD docker/objectivefs-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/objectivefs-installer.sh && objectivefs-installer.sh
# install wrappers # install wrappers
ADD docker/iscsiadm /usr/local/sbin ADD docker/iscsiadm /usr/local/sbin
RUN chmod +x /usr/local/sbin/iscsiadm RUN chmod +x /usr/local/sbin/iscsiadm
@ -117,16 +85,9 @@ RUN chmod +x /usr/local/bin/mount
ADD docker/umount /usr/local/bin/umount ADD docker/umount /usr/local/bin/umount
RUN chmod +x /usr/local/bin/umount RUN chmod +x /usr/local/bin/umount
ADD docker/zfs /usr/local/bin/zfs
RUN chmod +x /usr/local/bin/zfs
ADD docker/zpool /usr/local/bin/zpool
RUN chmod +x /usr/local/bin/zpool
ADD docker/oneclient /usr/local/bin/oneclient
RUN chmod +x /usr/local/bin/oneclient
# Run as a non-root user # Run as a non-root user
RUN useradd --create-home csi \ RUN useradd --create-home csi \
&& chown -R csi: /home/csi && chown -R csi: /home/csi
COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app COPY --from=build --chown=csi:csi /home/csi/app /home/csi/app

View File

@ -1,100 +0,0 @@
#
# https://github.com/kubernetes/kubernetes/blob/master/test/images/windows/powershell-helper/Dockerfile_windows
# https://github.com/kubernetes/kubernetes/blob/master/test/images/busybox/Dockerfile_windows
# https://github.com/kubernetes/kubernetes/tree/master/test/images#windows-test-images-considerations
# https://stefanscherer.github.io/find-dependencies-in-windows-containers/
#
# docker build --build-arg NANO_BASE_TAG=1809 --build-arg CORE_BASE_TAG=ltsc2019 -t foobar -f Dockerfile.Windows .
# docker run --rm -ti --entrypoint powershell foobar
# docker run --rm foobar
# docker save foobar -o foobar.tar
# buildah pull docker-archive:foobar.tar
# mcr.microsoft.com/windows/servercore:ltsc2019
# mcr.microsoft.com/windows/nanoserver:1809
ARG NANO_BASE_TAG
ARG CORE_BASE_TAG
FROM mcr.microsoft.com/windows/servercore:${CORE_BASE_TAG} as powershell
# install powershell
ENV PS_VERSION=6.2.7
ADD https://github.com/PowerShell/PowerShell/releases/download/v$PS_VERSION/PowerShell-$PS_VERSION-win-x64.zip /PowerShell/powershell.zip
RUN cd C:\PowerShell &\
tar.exe -xf powershell.zip &\
del powershell.zip &\
mklink powershell.exe pwsh.exe
FROM mcr.microsoft.com/windows/servercore:${CORE_BASE_TAG} as build
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
#ENV GPG_VERSION 4.0.2
ENV GPG_VERSION 2.3.4
RUN Invoke-WebRequest $('https://files.gpg4win.org/gpg4win-vanilla-{0}.exe' -f $env:GPG_VERSION) -OutFile 'gpg4win.exe' -UseBasicParsing ; \
Start-Process .\gpg4win.exe -ArgumentList '/S' -NoNewWindow -Wait
# https://github.com/nodejs/node#release-keys
RUN @( \
'4ED778F539E3634C779C87C6D7062848A1AB005C', \
'141F07595B7B3FFE74309A937405533BE57C7D57', \
'94AE36675C464D64BAFA68DD7434390BDBE9B9C5', \
'74F12602B6F1C4E913FAA37AD3A89613643B6201', \
'71DCFD284A79C3B38668286BC97EC7A07EDE3FC1', \
'61FC681DFB92A079F1685E77973F295594EC4689', \
'8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600', \
'C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8', \
'C82FA3AE1CBEDC6BE46B9360C43CEC45C17AB93C', \
'DD8F2338BAE7501E3DD5AC78C273792F7D83545D', \
'A48C2BEE680E841632CD4E44F07496B3EB3C1762', \
'108F52B48DB57BB0CC439B2997B01419BD92F80A', \
'B9E2F5981AA6E0CD28160D9FF13993A75599653C' \
) | foreach { \
gpg --keyserver hkps://keys.openpgp.org --recv-keys $_ ; \
}
ENV NODE_VERSION 16.18.0
RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/SHASUMS256.txt.asc' -f $env:NODE_VERSION) -OutFile 'SHASUMS256.txt.asc' -UseBasicParsing ;
#RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/SHASUMS256.txt.asc' -f $env:NODE_VERSION) -OutFile 'SHASUMS256.txt.asc' -UseBasicParsing ; \
# gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc
#gpg --verify SHASUMS256.txt.sig SHASUMS256.txt
RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/node-v{0}-win-x64.zip' -f $env:NODE_VERSION) -OutFile 'node.zip' -UseBasicParsing ; \
$sum = $(cat SHASUMS256.txt.asc | sls $(' node-v{0}-win-x64.zip' -f $env:NODE_VERSION)) -Split ' ' ; \
if ((Get-FileHash node.zip -Algorithm sha256).Hash -ne $sum[0]) { Write-Error 'SHA256 mismatch' } ; \
Expand-Archive node.zip -DestinationPath C:\ ; \
Rename-Item -Path $('C:\node-v{0}-win-x64' -f $env:NODE_VERSION) -NewName 'C:\nodejs'
#RUN setx /M PATH "%PATH%;C:\nodejs"
RUN setx /M PATH $(${Env:PATH} + \";C:\nodejs\")
RUN node --version; npm --version;
RUN mkdir /app
WORKDIR /app
COPY package*.json ./
RUN npm install --only=production; ls /
COPY . .
FROM mcr.microsoft.com/windows/nanoserver:${NANO_BASE_TAG}
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
LABEL org.opencontainers.image.licenses MIT
# if additional dlls are required can copy like this
#COPY --from=build /Windows/System32/nltest.exe /Windows/System32/nltest.exe
COPY --from=build /app /app
WORKDIR /app
# this works for both host-process and non-host-process container semantics
COPY --from=build /nodejs/node.exe ./bin
ENTRYPOINT [ "bin/node.exe", "--expose-gc", "bin/democratic-csi" ]

59
Dockerfile.unified Normal file
View File

@ -0,0 +1,59 @@
FROM debian:10-slim
ENV DEBIAN_FRONTEND=noninteractive
ARG TARGETPLATFORM
ARG BUILDPLATFORM
RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/* \
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
ENV LANG=en_US.utf8 NODE_VERSION=v12.20.0
RUN echo "I am running on $BUILDPLATFORM, building for $TARGETPLATFORM"
# install node
RUN apt-get update && apt-get install -y wget xz-utils
ADD docker/node-installer.sh /usr/local/sbin
RUN chmod +x /usr/local/sbin/node-installer.sh && node-installer.sh
ENV PATH=/usr/local/lib/nodejs/bin:$PATH
# node service requirements
RUN apt-get update && \
apt-get install -y e2fsprogs xfsprogs fatresize dosfstools nfs-common cifs-utils sudo && \
rm -rf /var/lib/apt/lists/*
# controller requirements
RUN apt-get update && \
apt-get install -y ansible && \
rm -rf /var/lib/apt/lists/*
# npm requirements
# gcc and g++ required by grpc-usd until proper upstream support
RUN apt-get update && \
apt-get install -y python make gcc g++ && \
rm -rf /var/lib/apt/lists/*
# install wrappers
ADD docker/iscsiadm /usr/local/sbin
RUN chmod +x /usr/local/sbin/iscsiadm
ADD docker/multipath /usr/local/sbin
RUN chmod +x /usr/local/sbin/multipath
# Run as a non-root user
RUN useradd --create-home csi \
&& mkdir /home/csi/app \
&& chown -R csi: /home/csi
WORKDIR /home/csi/app
USER csi
COPY package*.json ./
RUN npm install
COPY --chown=csi:csi . .
USER root
EXPOSE 50051
ENTRYPOINT [ "bin/democratic-csi" ]

550
README.md
View File

@ -1,6 +1,5 @@
![Image](https://img.shields.io/docker/pulls/democraticcsi/democratic-csi.svg) ![Image](https://img.shields.io/docker/pulls/democraticcsi/democratic-csi.svg)
![Image](https://img.shields.io/github/actions/workflow/status/democratic-csi/democratic-csi/main.yml?branch=master&style=flat-square) ![Image](https://img.shields.io/github/workflow/status/democratic-csi/democratic-csi/CI?style=flat-square)
[![Artifact Hub](https://img.shields.io/endpoint?url=https://artifacthub.io/badge/repository/democratic-csi)](https://artifacthub.io/packages/search?repo=democratic-csi)
# Introduction # Introduction
@ -19,28 +18,10 @@ have access to resizing, snapshots, clones, etc functionality.
- `freenas-nfs` (manages zfs datasets to share over nfs) - `freenas-nfs` (manages zfs datasets to share over nfs)
- `freenas-iscsi` (manages zfs zvols to share over iscsi) - `freenas-iscsi` (manages zfs zvols to share over iscsi)
- `freenas-smb` (manages zfs datasets to share over smb) - `freenas-smb` (manages zfs datasets to share over smb)
- `freenas-api-nfs` experimental use with SCALE only (manages zfs datasets to share over nfs)
- `freenas-api-iscsi` experimental use with SCALE only (manages zfs zvols to share over iscsi)
- `freenas-api-smb` experimental use with SCALE only (manages zfs datasets to share over smb)
- `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu) - `zfs-generic-nfs` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu) - `zfs-generic-iscsi` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-smb` (works with any ZoL installation...ie: Ubuntu)
- `zfs-generic-nvmeof` (works with any ZoL installation...ie: Ubuntu)
- `zfs-local-ephemeral-inline` (provisions node-local zfs datasets) - `zfs-local-ephemeral-inline` (provisions node-local zfs datasets)
- `zfs-local-dataset` (provision node-local volume as dataset) - `nfs-client` (crudely provisions storage using a shared nfs share/directory for all volumes)
- `zfs-local-zvol` (provision node-local volume as zvol)
- `synology-iscsi` experimental (manages volumes to share over iscsi)
- `objectivefs` (manages objectivefs volumes)
- `lustre-client` (crudely provisions storage using a shared lustre
share/directory for all volumes)
- `nfs-client` (crudely provisions storage using a shared nfs share/directory
for all volumes)
- `smb-client` (crudely provisions storage using a shared smb share/directory
for all volumes)
- `local-hostpath` (crudely provisions node-local directories)
- `node-manual` (allows connecting to manually created smb, nfs, lustre,
oneclient, nvmeof, and iscsi volumes, see sample PVs in the `examples`
directory)
- framework for developing `csi` drivers - framework for developing `csi` drivers
If you have any interest in providing a `csi` driver, simply open an issue to If you have any interest in providing a `csi` driver, simply open an issue to
@ -56,41 +37,15 @@ Predominantly 3 things are needed:
- deploy the driver into the cluster (`helm` chart provided with sample - deploy the driver into the cluster (`helm` chart provided with sample
`values.yaml`) `values.yaml`)
## Community Guides ## Guides
- https://jonathangazeley.com/2021/01/05/using-truenas-to-provide-persistent-storage-for-kubernetes/ - https://jonathangazeley.com/2021/01/05/using-truenas-to-provide-persistent-storage-for-kubernetes/
- https://www.lisenet.com/2021/moving-to-truenas-and-democratic-csi-for-kubernetes-persistent-storage/
- https://gist.github.com/admun/4372899f20421a947b7544e5fc9f9117 (migrating
from `nfs-client-provisioner` to `democratic-csi`)
- https://gist.github.com/deefdragon/d58a4210622ff64088bd62a5d8a4e8cc
(migrating between storage classes using `velero`)
- https://github.com/fenio/k8s-truenas (NFS/iSCSI over API with TrueNAS Scale)
## Node Prep ## Node Prep
You should install/configure the requirements for both nfs and iscsi. You should install/configure the requirements for both nfs and iscsi.
### cifs Follow the instructions here: https://netapp-trident.readthedocs.io/en/stable-v20.04/kubernetes/operations/tasks/worker.html
```bash
# RHEL / CentOS
sudo yum install -y cifs-utils
# Ubuntu / Debian
sudo apt-get install -y cifs-utils
```
### nfs
```bash
# RHEL / CentOS
sudo yum install -y nfs-utils
# Ubuntu / Debian
sudo apt-get install -y nfs-common
```
### iscsi
Note that `multipath` is supported for the `iscsi`-based drivers. Simply setup Note that `multipath` is supported for the `iscsi`-based drivers. Simply setup
multipath to your liking and set multiple portals in the config as appropriate. multipath to your liking and set multiple portals in the config as appropriate.
@ -99,135 +54,14 @@ If you are running Kubernetes with rancher/rke please see the following:
- https://github.com/rancher/rke/issues/1846 - https://github.com/rancher/rke/issues/1846
#### RHEL / CentOS ### freenas-smb
```bash If using with Windows based machines you may need to enable guest access (even
# Install the following system packages if you are connecting with credentiasl)
sudo yum install -y lsscsi iscsi-initiator-utils sg3_utils device-mapper-multipath
# Enable multipathing
sudo mpathconf --enable --with_multipathd y
# Ensure that iscsid and multipathd are running
sudo systemctl enable iscsid multipathd
sudo systemctl start iscsid multipathd
# Start and enable iscsi
sudo systemctl enable iscsi
sudo systemctl start iscsi
```
#### Ubuntu / Debian
``` ```
# Install the following system packages Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters AllowInsecureGuestAuth -Value 1
sudo apt-get install -y open-iscsi lsscsi sg3-utils multipath-tools scsitools Restart-Service LanmanWorkstation -Force
# Enable multipathing
sudo tee /etc/multipath.conf <<-'EOF'
defaults {
user_friendly_names yes
find_multipaths yes
}
EOF
sudo systemctl enable multipath-tools.service
sudo service multipath-tools restart
# Ensure that open-iscsi and multipath-tools are enabled and running
sudo systemctl status multipath-tools
sudo systemctl enable open-iscsi.service
sudo service open-iscsi start
sudo systemctl status open-iscsi
```
#### [Talos](https://www.talos.dev/)
To use iscsi storage in kubernetes cluster in talos these steps are needed which are similar to the ones explained in https://www.talos.dev/v1.1/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/#patching-the-jiva-installation
##### Patch nodes
since talos does not have iscsi support by default, the iscsi extension is needed
create a `patch.yaml` file with
```yaml
- op: add
path: /machine/install/extensions
value:
- image: ghcr.io/siderolabs/iscsi-tools:v0.1.1
```
and apply the patch across all of your nodes
```bash
talosctl -e <endpoint ip/hostname> -n <node ip/hostname> patch mc -p @patch.yaml
```
the extension will not activate until you "upgrade" the nodes, even if there is no update, use the latest version of talos installer.
VERIFY THE TALOS VERSION IN THIS COMMAND BEFORE RUNNING IT AND READ THE [OpenEBS Jiva](https://www.talos.dev/v1.1/kubernetes-guides/configuration/replicated-local-storage-with-openebs-jiva/#patching-the-jiva-installation).
upgrade all of the nodes in the cluster to get the extension
```bash
talosctl -e <endpoint ip/hostname> -n <node ip/hostname> upgrade --image=ghcr.io/siderolabs/installer:v1.1.1
```
in your `values.yaml` file make sure to enable these settings
```yaml
node:
hostPID: true
driver:
extraEnv:
- name: ISCSIADM_HOST_STRATEGY
value: nsenter
- name: ISCSIADM_HOST_PATH
value: /usr/local/sbin/iscsiadm
iscsiDirHostPath: /usr/local/etc/iscsi
iscsiDirHostPathType: ""
```
and continue your democratic installation as usuall with other iscsi drivers.
#### Privileged Namespace
democratic-csi requires privileged access to the nodes, so the namespace should allow for privileged pods. One way of doing it is via [namespace labels](https://kubernetes.io/docs/tasks/configure-pod-container/enforce-standards-namespace-labels/).
Add the followin label to the democratic-csi installation namespace `pod-security.kubernetes.io/enforce=privileged`
```
kubectl label --overwrite namespace democratic-csi pod-security.kubernetes.io/enforce=privileged
```
### nvmeof
```bash
# not required but likely helpful (tools are included in the democratic images
# so not needed on the host)
apt-get install -y nvme-cli
# get the nvme fabric modules
apt-get install linux-generic
# ensure the nvmeof modules get loaded at boot
cat <<EOF > /etc/modules-load.d/nvme.conf
nvme
nvme-tcp
nvme-fc
nvme-rdma
EOF
# load the modules immediately
modprobe nvme
modprobe nvme-tcp
modprobe nvme-fc
modprobe nvme-rdma
# nvme has native multipath or can use DM multipath
# democratic-csi will gracefully handle either configuration
# RedHat recommends DM multipath (nvme_core.multipath=N)
cat /sys/module/nvme_core/parameters/multipath
# kernel arg to enable/disable native multipath
nvme_core.multipath=N
``` ```
### zfs-local-ephemeral-inline ### zfs-local-ephemeral-inline
@ -241,178 +75,37 @@ necessary.
- https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md - https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
- https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
### zfs-local-{dataset,zvol}
This `driver` provisions node-local storage. Each node should have an
identically named zfs pool created and avaialble to the `driver`. Note, this is
_NOT_ the same thing as using the docker zfs storage driver (although the same
pool could be used). Nodes should have the standard `zfs` utilities installed.
In the name of ease-of-use these drivers by default report `MULTI_NODE` support
(`ReadWriteMany` in k8s) however the volumes will implicity only work on the
node where originally provisioned. Topology contraints manage this in an
automated fashion preventing any undesirable behavior. So while you may
provision `MULTI_NODE` / `RWX` volumes, any workloads using the volume will
always land on a single node and that node will always be the node where the
volume is/was provisioned.
### local-hostpath
This `driver` provisions node-local storage. Each node should have an
identically name folder where volumes will be created.
In the name of ease-of-use these drivers by default report `MULTI_NODE` support
(`ReadWriteMany` in k8s) however the volumes will implicity only work on the
node where originally provisioned. Topology contraints manage this in an
automated fashion preventing any undesirable behavior. So while you may
provision `MULTI_NODE` / `RWX` volumes, any workloads using the volume will
always land on a single node and that node will always be the node where the
volume is/was provisioned.
The nature of this `driver` also prevents the enforcement of quotas. In short
the requested volume size is generally ignored.
### windows
Support for Windows was introduced in `v1.7.0`. Currently support is limited
to kubernetes nodes capabale of running `HostProcess` containers. Support was
tested against `Windows Server 2019` using `rke2-v1.24`. Currently any of the
`-smb` and `-iscsi` drivers will work. Support for `ntfs` was added to the
linux nodes as well (using the `ntfs3` driver) so volumes created can be
utilized by nodes with either operating system (in the case of `cifs` by both
simultaneously).
If using any `-iscsi` driver be sure your iqns are always fully lower-case by
default (https://github.com/PowerShell/PowerShell/issues/17306).
Due to current limits in the kubernetes tooling it is not possible to use the
`local-hostpath` driver but support is implemented in this project and will
work as soon as kubernetes support is available.
```powershell
# ensure all updates are installed
# enable the container feature
Enable-WindowsOptionalFeature -Online -FeatureName Containers All
# install a HostProcess compatible kubernetes
# smb support
# If using with Windows based machines you may need to enable guest access
# (even if you are connecting with credentials)
Set-ItemProperty HKLM:\SYSTEM\CurrentControlSet\Services\LanmanWorkstation\Parameters AllowInsecureGuestAuth -Value 1
Restart-Service LanmanWorkstation -Force
# iscsi
# enable iscsi service and mpio as appropriate
Get-Service -Name MSiSCSI
Set-Service -Name MSiSCSI -StartupType Automatic
Start-Service -Name MSiSCSI
Get-Service -Name MSiSCSI
# mpio
Get-WindowsFeature -Name 'Multipath-IO'
Add-WindowsFeature -Name 'Multipath-IO'
Enable-MSDSMAutomaticClaim -BusType "iSCSI"
Disable-MSDSMAutomaticClaim -BusType "iSCSI"
Get-MSDSMGlobalDefaultLoadBalancePolicy
Set-MSDSMGlobalLoadBalancePolicy -Policy RR
```
- https://kubernetes.io/blog/2021/08/16/windows-hostprocess-containers/
- https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/
## Server Prep ## Server Prep
Server preparation depends slightly on which `driver` you are using. Server preparation depends slightly on which `driver` you are using.
### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb, freenas-api-nfs, freenas-api-iscsi, freenas-api-smb) ### FreeNAS (freenas-nfs, freenas-iscsi, freenas-smb)
The recommended version of FreeNAS is 12.0-U2+, however the driver should work The recommended version of FreeNAS is 11.3+, however the driver should work
with much older versions as well. with much older versions as well.
The various `freenas-api-*` drivers are currently EXPERIMENTAL and can only be
used with SCALE 21.08+. Fundamentally these drivers remove the need for `ssh`
connections and do all operations entirely with the TrueNAS api. With that in
mind, any ssh/shell/etc requirements below can be safely ignored. The minimum
volume size through the api is `1G` so beware that requested volumes with a
size small will be increased to `1G`. Also note the following known issues:
- https://jira.ixsystems.com/browse/NAS-111870
- https://github.com/democratic-csi/democratic-csi/issues/112
- https://github.com/democratic-csi/democratic-csi/issues/101
Ensure the following services are configurged and running: Ensure the following services are configurged and running:
- ssh (if you use a password for authentication make sure it is allowed) - ssh (if you use a password for authentication make sure it is allowed)
- https://www.truenas.com/community/threads/ssh-access-ssh-rsa-not-in-pubkeyacceptedalgorithms.101715/
- `PubkeyAcceptedAlgorithms +ssh-rsa`
- ensure `zsh`, `bash`, or `sh` is set as the root shell, `csh` gives false errors due to quoting - ensure `zsh`, `bash`, or `sh` is set as the root shell, `csh` gives false errors due to quoting
- nfs - nfs
- iscsi - iscsi
- when using the FreeNAS API concurrently the `/etc/ctl.conf` file on the
- (fixed in 12.0-U2+) when using the FreeNAS API concurrently the server can become invalid, some sample scripts are provided in the
`/etc/ctl.conf` file on the server can become invalid, some sample scripts `contrib` directory to clean things up
are provided in the `contrib` directory to clean things up ie: copy the ie: copy the script to the server and directly and run - `./ctld-config-watchdog-db.sh | logger -t ctld-config-watchdog-db.sh &`
script to the server and directly and run - `./ctld-config-watchdog-db.sh | logger -t ctld-config-watchdog-db.sh &`
please read the scripts and set the variables as appropriate for your server. please read the scripts and set the variables as appropriate for your server.
- ensure you have pre-emptively created portals, initatior groups, auths - ensure you have pre-emptively created portal, group, auth
- make note of the respective IDs (the true ID may not reflect what is
visible in the UI)
- IDs can be visible by clicking the the `Edit` link and finding the ID in the
browser address bar
- Optionally you may use the following to retrieve appropiate IDs:
- `curl --header "Accept: application/json" --user root:<password> 'http(s)://<ip>/api/v2.0/iscsi/portal'`
- `curl --header "Accept: application/json" --user root:<password> 'http(s)://<ip>/api/v2.0/iscsi/initiator'`
- `curl --header "Accept: application/json" --user root:<password> 'http(s)://<ip>/api/v2.0/iscsi/auth'`
- The maximum number of volumes is limited to 255 by default on FreeBSD (physical devices such as disks and CD-ROM drives count against this value).
Be sure to properly adjust both [tunables](https://www.freebsd.org/cgi/man.cgi?query=ctl&sektion=4#end) `kern.cam.ctl.max_ports` and `kern.cam.ctl.max_luns` to avoid running out of resources when dynamically provisioning iSCSI volumes on FreeNAS or TrueNAS Core.
- smb - smb
If you would prefer you can configure `democratic-csi` to use a In addition, if you want to use a non-root user for the ssh operations you may
non-`root` user when connecting to the FreeNAS server: create a `csi` user and then run `visudo` directly from the console. Make sure
the line for the `csi` user has `NOPASSWD` added (note this can get reset by
FreeNAS if you alter the user via the GUI later):
- Create a non-`root` user (e.g., `csi`) ```
csi ALL=(ALL) NOPASSWD:ALL
- Ensure that user has passwordless `sudo` privileges: ```
```
csi ALL=(ALL) NOPASSWD:ALL
# if on CORE 12.0-u3+ you should be able to do the following
# which will ensure it does not get reset during reboots etc
# at the command prompt
cli
# after you enter the truenas cli and are at that prompt
account user query select=id,username,uid,sudo_nopasswd
# find the `id` of the user you want to update (note, this is distinct from the `uid`)
account user update id=<id> sudo=true
account user update id=<id> sudo_nopasswd=true
# optional if you want to disable password
#account user update id=<id> password_disabled=true
# exit cli by hitting ctrl-d
# confirm sudoers file is appropriate
cat /usr/local/etc/sudoers
```
(note this can get reset by FreeNAS if you alter the user via the
GUI later)
- Instruct `democratic-csi` to use `sudo` by adding the following to
your driver configuration:
```
zfs:
cli:
sudoEnabled: true
```
Starting with TrueNAS CORE 12 it is also possible to use an `apiKey` instead of Starting with TrueNAS CORE 12 it is also possible to use an `apiKey` instead of
the `root` password for the http connection. the `root` password for the http connection.
@ -425,144 +118,17 @@ Issues to review:
- https://jira.ixsystems.com/browse/NAS-108522 - https://jira.ixsystems.com/browse/NAS-108522
- https://jira.ixsystems.com/browse/NAS-107219 - https://jira.ixsystems.com/browse/NAS-107219
### ZoL (zfs-generic-nfs, zfs-generic-iscsi, zfs-generic-smb, zfs-generic-nvmeof) ### ZoL (zfs-generic-nfs, zfs-generic-iscsi)
Ensure ssh and zfs is installed on the nfs/iscsi server and that you have installed Ensure ssh and zfs is installed on the nfs/iscsi server and that you have installed
`targetcli`. `targetcli`.
The driver executes many commands over an ssh connection. You may consider - `sudo yum install targetcli -y`
disabling all the `motd` details for the ssh user as it can spike the cpu - `sudo apt-get -y install targetcli-fb`
unecessarily:
- https://askubuntu.com/questions/318592/how-can-i-remove-the-landscape-canonical-com-greeting-from-motd
- https://linuxconfig.org/disable-dynamic-motd-and-news-on-ubuntu-20-04-focal-fossa-linux
- https://github.com/democratic-csi/democratic-csi/issues/151 (some notes on
using delegated zfs permissions)
```bash
####### nfs
yum install -y nfs-utils
systemctl enable --now nfs-server.service
apt-get install -y nfs-kernel-server
systemctl enable --now nfs-kernel-server.service
####### iscsi
yum install targetcli -y
apt-get -y install targetcli-fb
####### smb
apt-get install -y samba smbclient
# create posix user
groupadd -g 1001 smbroot
useradd -u 1001 -g 1001 -M -N -s /sbin/nologin smbroot
passwd smbroot (optional)
# create smb user and set password
smbpasswd -L -a smbroot
####### nvmeof
# ensure nvmeof target modules are loaded at startup
cat <<EOF > /etc/modules-load.d/nvmet.conf
nvmet
nvmet-tcp
nvmet-fc
nvmet-rdma
EOF
# load the modules immediately
modprobe nvmet
modprobe nvmet-tcp
modprobe nvmet-fc
modprobe nvmet-rdma
# install nvmetcli and systemd services
git clone git://git.infradead.org/users/hch/nvmetcli.git
cd nvmetcli
## install globally
python3 setup.py install --prefix=/usr
pip install configshell_fb
## install to root home dir
python3 setup.py install --user
pip install configshell_fb --user
# prevent log files from filling up disk
ln -sf /dev/null ~/.nvmetcli/log.txt
ln -sf /dev/null ~/.nvmetcli/history.txt
# install systemd unit and enable/start
## optionally to ensure the config file is loaded before we start
## reading/writing to it add an ExecStartPost= to the unit file
##
## ExecStartPost=/usr/bin/touch /var/run/nvmet-config-loaded
##
## in your dirver config set nvmeof.shareStrategyNvmetCli.configIsImportedFilePath=/var/run/nvmet-config-loaded
## which will prevent the driver from making any changes until the configured
## file is present
vi nvmet.service
cp nvmet.service /etc/systemd/system/
mkdir -p /etc/nvmet
systemctl daemon-reload
systemctl enable --now nvmet.service
systemctl status nvmet.service
# create the port(s) configuration manually
echo "
cd /
ls
" | nvmetcli
# do this multiple times altering as appropriate if you have/want multipath
# change the port to 2, 3.. each additional path
# the below example creates a tcp port listening on all IPs on port 4420
echo "
cd /ports
create 1
cd 1
set addr adrfam=ipv4 trtype=tcp traddr=0.0.0.0 trsvcid=4420
saveconfig /etc/nvmet/config.json
" | nvmetcli
# if running TrueNAS SCALE you can skip the above and simply copy
# contrib/scale-nvmet-start.sh to your machine and add it as a startup script
# to launch POSTINIT type COMMAND
# and then create the port(s) as mentioned above
```
### Synology (synology-iscsi)
Ensure iscsi manager has been installed and is generally setup/configured. DSM 6.3+ is supported.
### objectivefs (objectivefs)
ObjectiveFS requires the use of an _Admin Key_ to properly automate the
lifecycle of filesystems. Each deployment of the driver will point to a single
`pool` (bucket) and create individual `filesystems` within that bucket
on-demand.
Ensure the config value used for `pool` is an existing bucket. Be sure the
bucket is _NOT_ being used in fs mode (ie: the whole bucket is a single fs).
The `democratic-csi` `node` container will host the fuse mount process so
be careful to only upgrade when all relevant workloads have been drained from
the respective node. Also beware that any cpu/memory limits placed on the
container by the orchestration system will impact any ability to use the
caching, etc features of objectivefs.
- https://objectivefs.com/howto/csi-driver-objectivefs
- https://objectivefs.com/howto/csi-driver-objectivefs-kubernetes-managed
- https://objectivefs.com/howto/objectivefs-admin-key-setup
- https://objectivefs.com/features#filesystem-pool
- https://objectivefs.com/howto/how-to-create-a-filesystem-with-an-existing-empty-bucket
## Helm Installation ## Helm Installation
```bash ```
helm repo add democratic-csi https://democratic-csi.github.io/charts/ helm repo add democratic-csi https://democratic-csi.github.io/charts/
helm repo update helm repo update
# helm v2 # helm v2
@ -604,16 +170,12 @@ microk8s helm upgrade \
zfs-nfs democratic-csi/democratic-csi zfs-nfs democratic-csi/democratic-csi
``` ```
- microk8s - `/var/snap/microk8s/common/var/lib/kubelet`
- pivotal - `/var/vcap/data/kubelet`
- k0s - `/var/lib/k0s/kubelet`
### openshift ### openshift
`democratic-csi` generally works fine with openshift. Some special parameters `democratic-csi` generally works fine with openshift. Some special parameters
need to be set with helm (support added in chart version `0.6.1`): need to be set with helm (support added in chart version `0.6.1`):
```bash ```
# for sure required # for sure required
--set node.rbac.openshift.privileged=true --set node.rbac.openshift.privileged=true
--set node.driver.localtimeHostPath=false --set node.driver.localtimeHostPath=false
@ -623,62 +185,46 @@ need to be set with helm (support added in chart version `0.6.1`):
``` ```
### Nomad ### Nomad
`democratic-csi` works with Nomad in a functioning but limted capacity. See the [Nomad docs](docs/nomad.md) for details.
`democratic-csi` works with Nomad in a functioning but limted capacity. See the
[Nomad docs](docs/nomad.md) for details.
### Docker Swarm
- https://github.com/moby/moby/blob/master/docs/cluster_volumes.md
- https://github.com/olljanat/csi-plugins-for-docker-swarm
## Multiple Deployments ## Multiple Deployments
You may install multiple deployments of each/any driver. It requires the You may install multiple deployments of each/any driver. It requires the following:
following:
- Use a new helm release name for each deployment - Use a new helm release name for each deployment
- Make sure you have a unique `csiDriver.name` in the values file (within the - Make sure you have a unique `csiDriver.name` in the values file
same cluster)
- Use unqiue names for your storage classes (per cluster) - Use unqiue names for your storage classes (per cluster)
- Use a unique parent dataset (ie: don't try to use the same parent across - Use a unique parent dataset (ie: don't try to use the same parent across deployments or clusters)
deployments or clusters)
- For `iscsi` and `smb` be aware that the names of assets/shares are _global_
and so collisions are possible/probable. Appropriate use of the respective
`nameTemplate`, `namePrefix`, and `nameSuffix` configuration options will
mitigate the issue [#210](https://github.com/democratic-csi/democratic-csi/issues/210).
# Snapshot Support # Snapshot Support
Install snapshot controller (once per cluster): Install beta (v1.17+) CRDs (once per cluster):
- https://github.com/democratic-csi/charts/tree/master/stable/snapshot-controller
OR
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd - https://github.com/kubernetes-csi/external-snapshotter/tree/master/client/config/crd
```
kubectl apply -f snapshot.storage.k8s.io_volumesnapshotclasses.yaml
kubectl apply -f snapshot.storage.k8s.io_volumesnapshotcontents.yaml
kubectl apply -f snapshot.storage.k8s.io_volumesnapshots.yaml
```
Install snapshot controller (once per cluster):
- https://github.com/kubernetes-csi/external-snapshotter/tree/master/deploy/kubernetes/snapshot-controller - https://github.com/kubernetes-csi/external-snapshotter/tree/master/deploy/kubernetes/snapshot-controller
```
# replace namespace references to your liking
kubectl apply -f rbac-snapshot-controller.yaml
kubectl apply -f setup-snapshot-controller.yaml
```
Install `democratic-csi` as usual with `volumeSnapshotClasses` defined as appropriate. Install `democratic-csi` as usual with `volumeSnapshotClasses` defined as appropriate.
- https://kubernetes.io/docs/concepts/storage/volume-snapshots/ - https://kubernetes.io/docs/concepts/storage/volume-snapshots/
- https://github.com/kubernetes-csi/external-snapshotter#usage - https://github.com/kubernetes-csi/external-snapshotter#usage
- https://github.com/democratic-csi/democratic-csi/issues/129#issuecomment-961489810
# Migrating from freenas-provisioner and freenas-iscsi-provisioner
It is possible to migrate all volumes from the non-csi freenas provisioners
to `democratic-csi`.
Copy the `contrib/freenas-provisioner-to-democratic-csi.sh` script from the
project to your workstation, read the script in detail, and edit the variables
to your needs to start migrating!
# Related # Related
- https://github.com/nmaupu/freenas-provisioner - https://github.com/nmaupu/freenas-provisioner
- https://github.com/travisghansen/freenas-iscsi-provisioner - https://github.com/travisghansen/freenas-iscsi-provisioner
- https://datamattsson.tumblr.com/post/624751011659202560/welcome-truenas-core-container-storage-provider - https://datamattsson.tumblr.com/post/624751011659202560/welcome-truenas-core-container-storage-provider
- https://github.com/dravanet/truenas-csi
- https://github.com/SynologyOpenSource/synology-csi
- https://github.com/openebs/zfs-localpv

View File

@ -1,18 +1,8 @@
#!/usr/bin/env -S node --expose-gc ${NODE_OPTIONS_CSI_1} ${NODE_OPTIONS_CSI_2} ${NODE_OPTIONS_CSI_3} ${NODE_OPTIONS_CSI_4} #!/usr/bin/env -S node --nouse-idle-notification --expose-gc
/**
* keep the shebang line length under 128
* https://github.com/democratic-csi/democratic-csi/issues/171
*/
// polyfills
require("../src/utils/polyfills");
const yaml = require("js-yaml"); const yaml = require("js-yaml");
const fs = require("fs"); const fs = require("fs");
const { grpc } = require("../src/utils/grpc");
const { stringify, stripWindowsDriveLetter } = require("../src/utils/general");
let driverConfigFile;
let options; let options;
const args = require("yargs") const args = require("yargs")
.env("DEMOCRATIC_CSI") .env("DEMOCRATIC_CSI")
@ -22,29 +12,17 @@ const args = require("yargs")
describe: "provide a path to driver config file", describe: "provide a path to driver config file",
config: true, config: true,
configParser: (path) => { configParser: (path) => {
// normalize path for host-process containers try {
// CONTAINER_SANDBOX_MOUNT_POINT C:\C\0eac9a8da76f6d7119c5d9f86c8b3106d67dbbf01dbeb22fdc0192476b7e31cb\ options = JSON.parse(fs.readFileSync(path, "utf-8"));
// path is injected as C:\config\driver-config-file.yaml return true;
if (process.env.CONTAINER_SANDBOX_MOUNT_POINT) { } catch (e) {}
path = `${
process.env.CONTAINER_SANDBOX_MOUNT_POINT
}${stripWindowsDriveLetter(path)}`;
}
try { try {
options = yaml.load(fs.readFileSync(path, "utf8")); options = yaml.safeLoad(fs.readFileSync(path, "utf8"));
try {
driverConfigFile = fs.realpathSync(path);
} catch (e) {
console.log("failed finding config file realpath: " + e.toString());
driverConfigFile = path;
}
return true; return true;
} catch (e) { } catch (e) {}
console.log("failed parsing config file: " + path);
throw e; throw new Error("failed parsing config file: " + path);
}
}, },
}) })
.demandOption(["driver-config-file"], "driver-config-file is required") .demandOption(["driver-config-file"], "driver-config-file is required")
@ -54,20 +32,7 @@ const args = require("yargs")
}) })
.option("csi-version", { .option("csi-version", {
describe: "versin of the csi spec to load", describe: "versin of the csi spec to load",
choices: [ choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0", "1.3.0"],
"0.2.0",
"0.3.0",
"1.0.0",
"1.1.0",
"1.2.0",
"1.3.0",
"1.4.0",
"1.5.0",
"1.6.0",
"1.7.0",
"1.8.0",
"1.9.0",
],
}) })
.demandOption(["csi-version"], "csi-version is required") .demandOption(["csi-version"], "csi-version is required")
.option("csi-name", { .option("csi-name", {
@ -93,11 +58,6 @@ const args = require("yargs")
describe: "listen socket for the server", describe: "listen socket for the server",
type: "string", type: "string",
}) })
.option("server-socket-permissions-mode", {
describe: "permissions on the socket file for the server",
type: "string",
default: "0600", // os default is 755
})
.version() .version()
.help().argv; .help().argv;
@ -106,12 +66,11 @@ if (!args.serverSocket && !args.serverAddress && !args.serverPort) {
process.exit(1); process.exit(1);
} }
//console.log(args);
//console.log(process.env);
const package = require("../package.json"); const package = require("../package.json");
args.version = package.version; args.version = package.version;
//const grpc = require("grpc");
const grpc = require("grpc-uds");
const protoLoader = require("@grpc/proto-loader"); const protoLoader = require("@grpc/proto-loader");
const LRU = require("lru-cache"); const LRU = require("lru-cache");
const cache = new LRU({ max: 500 }); const cache = new LRU({ max: 500 });
@ -122,8 +81,7 @@ const GeneralUtils = require("../src/utils/general");
if (args.logLevel) { if (args.logLevel) {
logger.level = args.logLevel; logger.level = args.logLevel;
} }
const csiVersion = process.env.CSI_VERSION || "1.2.0";
const csiVersion = process.env.CSI_VERSION || args.csiVersion || "1.5.0";
const PROTO_PATH = __dirname + "/../csi_proto/csi-v" + csiVersion + ".proto"; const PROTO_PATH = __dirname + "/../csi_proto/csi-v" + csiVersion + ".proto";
// Suggested options for similarity to existing grpc.load behavior // Suggested options for similarity to existing grpc.load behavior
@ -140,17 +98,14 @@ const csi = protoDescriptor.csi.v1;
logger.info("initializing csi driver: %s", options.driver); logger.info("initializing csi driver: %s", options.driver);
const { Registry } = require("../src/utils/registry");
let globalRegistry = new Registry();
let driver; let driver;
try { try {
driver = require("../src/driver/factory").factory( driver = require("../src/driver/factory").factory(
{ logger, args, cache, package, csiVersion, registry: globalRegistry }, { logger, args, cache, package },
options options
); );
} catch (err) { } catch (err) {
logger.error(`${err.toString()} ${err.stack}`); logger.error(err.toString());
process.exit(1); process.exit(1);
} }
@ -163,21 +118,12 @@ try {
let operationLock = new Set(); let operationLock = new Set();
async function requestHandlerProxy(call, callback, serviceMethodName) { async function requestHandlerProxy(call, callback, serviceMethodName) {
const cleansedCall = JSON.parse(stringify(call)); const cleansedCall = JSON.parse(JSON.stringify(call));
delete cleansedCall.call;
delete cleansedCall.canceled;
for (const key in cleansedCall) {
if (key.startsWith("_")) {
delete cleansedCall[key];
}
}
for (const key in cleansedCall.request) { for (const key in cleansedCall.request) {
if (key.includes("secret")) { if (key.includes("secret")) {
cleansedCall.request[key] = "redacted"; cleansedCall.request[key] = "redacted";
} }
} }
try { try {
logger.info( logger.info(
"new request - driver: %s method: %s call: %j", "new request - driver: %s method: %s call: %j",
@ -201,18 +147,6 @@ async function requestHandlerProxy(call, callback, serviceMethodName) {
}); });
} }
// for testing purposes
//await GeneralUtils.sleep(10000);
//throw new Error("fake error");
// for CI/testing purposes
if (["NodePublishVolume", "NodeStageVolume"].includes(serviceMethodName)) {
await driver.setVolumeContextCache(
call.request.volume_id,
call.request.volume_context
);
}
let response; let response;
let responseError; let responseError;
try { try {
@ -238,31 +172,19 @@ async function requestHandlerProxy(call, callback, serviceMethodName) {
throw responseError; throw responseError;
} }
// for CI/testing purposes
if (serviceMethodName == "CreateVolume") {
await driver.setVolumeContextCache(
response.volume.volume_id,
response.volume.volume_context
);
}
logger.info( logger.info(
"new response - driver: %s method: %s response: %j", "new response - driver: %s method: %s response: %j",
driver.constructor.name, driver.constructor.name,
serviceMethodName, serviceMethodName,
response response
); );
callback(null, response); callback(null, response);
} catch (e) { } catch (e) {
let message; let message;
if (e instanceof Error) { if (e instanceof Error) {
message = e.toString(); message = e.toString();
if (e.stack) {
message += ` ${e.stack}`;
}
} else { } else {
message = stringify(e); message = JSON.stringify(e);
} }
logger.error( logger.error(
@ -317,9 +239,6 @@ function getServer() {
async ValidateVolumeCapabilities(call, callback) { async ValidateVolumeCapabilities(call, callback) {
requestHandlerProxy(call, callback, arguments.callee.name); requestHandlerProxy(call, callback, arguments.callee.name);
}, },
async ControllerGetVolume(call, callback) {
requestHandlerProxy(call, callback, arguments.callee.name);
},
async ListVolumes(call, callback) { async ListVolumes(call, callback) {
requestHandlerProxy(call, callback, arguments.callee.name); requestHandlerProxy(call, callback, arguments.callee.name);
}, },
@ -393,11 +312,9 @@ if (args.serverSocket) {
} }
logger.info( logger.info(
"starting csi server - node version: %s, package version: %s, config file: %s, csi-name: %s, csi-driver: %s, csi-mode: %s, csi-version: %s, address: %s, socket: %s", "starting csi server - name: %s, version: %s, driver: %s, mode: %s, csi version: %s, address: %s, socket: %s",
process.version,
args.version,
driverConfigFile,
args.csiName, args.csiName,
args.version,
options.driver, options.driver,
args.csiMode.join(","), args.csiMode.join(","),
args.csiVersion, args.csiVersion,
@ -405,176 +322,27 @@ logger.info(
bindSocket bindSocket
); );
const signalMapping = { if (bindAddress) {
1: "SIGHUP", csiServer.bind(bindAddress, grpc.ServerCredentials.createInsecure());
2: "SIGINT", }
3: "SIGQUIT",
4: "SIGILL",
5: "SIGTRAP",
6: "SIGABRT",
7: "SIGEMT",
8: "SIGFPE",
9: "SIGKILL",
10: "SIGBUS",
11: "SIGSEGV",
12: "SIGSYS",
13: "SIGPIPE",
14: "SIGALRM",
15: "SIGTERM",
16: "SIGURG",
17: "SIGSTOP",
18: "SIGTSTP",
19: "SIGCONT",
20: "SIGCHLD",
21: "SIGTTIN",
22: "SIGTTOU",
23: "SIGIO",
24: "SIGXCPU",
25: "SIGXFSZ",
26: "SIGVTALRM",
27: "SIGPROF",
28: "SIGWINCH",
29: "SIGINFO",
30: "SIGUSR1",
31: "SIGUSR2",
};
[(`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`)].forEach( if (bindSocket) {
csiServer.bind(bindSocket, grpc.ServerCredentials.createInsecure());
}
csiServer.start();
[`SIGINT`, `SIGUSR1`, `SIGUSR2`, `uncaughtException`, `SIGTERM`].forEach(
(eventType) => { (eventType) => {
process.on(eventType, async (code) => { process.on(eventType, (code) => {
let codeNumber = null; console.log(`running server shutdown, exit code: ${code}`);
let codeName = null; let socketPath = args.serverSocket || "";
if (code > 0) {
codeNumber = code;
codeName = signalMapping[code];
} else {
codeNumber = Object.keys(signalMapping).find(
(key) => signalMapping[key] === code
);
codeName = code;
}
console.log(
`running server shutdown, exit code: ${codeNumber} (${codeName})`
);
// attempt clean shutdown of in-flight requests
try {
await new Promise((resolve, reject) => {
try {
csiServer.tryShutdown(() => {
resolve();
});
} catch (e) {
reject(e);
}
});
console.log(`grpc server gracefully closed all connections`);
} catch (e) {
console.log("failed to cleanly shutdown grpc server", e);
}
// NOTE: if the shutdown above finishes cleanly the socket will already be removed
let socketPath = bindSocket;
socketPath = socketPath.replace(/^unix:\/\//g, ""); socketPath = socketPath.replace(/^unix:\/\//g, "");
if (socketPath && fs.existsSync(socketPath)) { if (socketPath && fs.existsSync(socketPath)) {
let fsStat = fs.statSync(socketPath); fs.unlinkSync(socketPath);
if (fsStat.isSocket()) {
fs.unlinkSync(socketPath);
console.log(`removed grpc socket ${socketPath}`);
}
} }
console.log("server fully shutdown, exiting"); process.exit(code);
process.exit(codeNumber);
}); });
} }
); );
if (process.env.LOG_MEMORY_USAGE == "1") {
setInterval(() => {
console.log("logging memory usages due to LOG_MEMORY_USAGE env var");
const used = process.memoryUsage();
for (let key in used) {
console.log(
`[${new Date()}] Memory Usage: ${key} ${
Math.round((used[key] / 1024 / 1024) * 100) / 100
} MB`
);
}
}, process.env.LOG_MEMORY_USAGE_INTERVAL || 5000);
}
if (process.env.MANUAL_GC == "1") {
setInterval(() => {
console.log("gc invoked due to MANUAL_GC env var");
try {
if (global.gc) {
global.gc();
}
} catch (e) {}
}, process.env.MANUAL_GC_INTERVAL || 60000);
}
if (process.env.LOG_GRPC_SESSIONS == "1") {
setInterval(() => {
console.log("dumping sessions");
try {
console.log(csiServer.sessions);
} catch (e) {}
}, 5000);
}
if (require.main === module) {
(async function () {
try {
if (bindAddress) {
await new Promise((resolve, reject) => {
csiServer.bindAsync(
bindAddress,
grpc.ServerCredentials.createInsecure(),
(err) => {
if (err) {
reject(err);
return;
}
resolve();
}
);
});
}
if (bindSocket) {
let socketPath = bindSocket;
socketPath = socketPath.replace(/^unix:\/\//g, "");
if (socketPath && fs.existsSync(socketPath)) {
let fsStat = fs.statSync(socketPath);
if (fsStat.isSocket()) {
fs.unlinkSync(socketPath);
}
}
await new Promise((resolve, reject) => {
csiServer.bindAsync(
bindSocket,
grpc.ServerCredentials.createInsecure(),
(err) => {
if (err) {
reject(err);
return;
}
resolve();
}
);
});
fs.chmodSync(socketPath, args["server-socket-permissions-mode"]);
}
csiServer.start();
} catch (e) {
console.log(e);
process.exit(1);
}
})();
}

View File

@ -1,178 +0,0 @@
#!/usr/bin/env -S node --nouse-idle-notification --expose-gc
/**
* The purpose of this script is to prune volumes in the storage system which
* do not have correlating PVs in k8s.
*
* kubectl -n democratic-csi exec -ti <controller pod> --container=csi-driver -- bash
* ./bin/k8s-csi-cleaner
*
* env vars:
* # prevents manual input on a per-volume basis to confirm delete action
* # default is 0
* AUTO_DELETE=1
*
* # outputs to the console which volumes would be cleaned vs not
* # default is 0
* DRY_RUN=1
*
* # endpoint for the csi grpc connection
* # default is unix:///csi-data/csi.sock
* CSI_ENDPOINT="localhost:50051"
*/
const _ = require("lodash");
const k8s = require("@kubernetes/client-node");
const prompt = require("prompt");
prompt.start();
const PROTO_PATH = __dirname + "/../csi_proto/csi-v1.5.0.proto";
//var grpc = require("grpc-uds");
var grpc = require("@grpc/grpc-js");
var protoLoader = require("@grpc/proto-loader");
// Suggested options for similarity to existing grpc.load behavior
var packageDefinition = protoLoader.loadSync(PROTO_PATH, {
keepCase: true,
longs: String,
enums: String,
defaults: true,
oneofs: true,
});
var protoDescriptor = grpc.loadPackageDefinition(packageDefinition);
// The protoDescriptor object has the full package hierarchy
var csi = protoDescriptor.csi.v1;
//console.log(csi);
var connectionEndpoint =
process.env.CSI_ENDPOINT || "unix:///csi-data/csi.sock";
var clientIdentity = new csi.Identity(
connectionEndpoint,
grpc.credentials.createInsecure()
);
var clientController = new csi.Controller(
connectionEndpoint,
grpc.credentials.createInsecure()
);
var clientNode = new csi.Node(
connectionEndpoint,
grpc.credentials.createInsecure()
);
async function executeRPC(service, methodName, options = {}) {
//console.log(service[methodName]);
return new Promise((resolve, reject) => {
const call = service[methodName](options, (error, data) => {
//console.log("%s - error: %j, data: %j", methodName, error, data);
if (error) {
reject(error);
}
resolve(data);
});
});
}
async function runControllerListVolumes(starting_token = "") {
const req = {
//max_entries: 3,
//starting_token: "77e73621-6fbc-4aec-9d6b-fc9ac2fd1a44:2"
starting_token,
};
return executeRPC(clientController, "ListVolumes", req);
}
async function runControllerDeleteVolume(volume_id) {
const req = {
volume_id: volume_id,
};
return executeRPC(clientController, "DeleteVolume", req);
}
async function main() {
// get k8s volumes
let k8sVolumes = await new Promise((resolve, reject) => {
const kc = new k8s.KubeConfig();
kc.loadFromDefault();
const k8sApi = kc.makeApiClient(k8s.CoreV1Api);
// V1PersistentVolumeList
k8sApi.listPersistentVolume().then((res) => {
//console.log(res);
//console.dir(res.body.items, { depth: null }); // `depth: null` ensures unlimited recursion
resolve(res.body.items);
});
});
console.log(`${k8sVolumes.length} k8s PVs discovered`);
// get csi volumes
let res;
let csiVolumes = [];
do {
res = await runControllerListVolumes();
csiVolumes = csiVolumes.concat(res.entries);
} while (res.next_token);
console.log(`${csiVolumes.length} csi volumes discovered`);
//console.log(k8sVolumes);
//console.log(csiVolumes);
for (let csiVolume of csiVolumes) {
let volume_id = csiVolume.volume.volume_id;
let volume_context = JSON.stringify(csiVolume.volume.volume_context) || "Unknown";
//console.log(`processing csi volume ${volume_id}`);
let k8sVolume = k8sVolumes.find((i_k8sVolume) => {
let volume_handle = _.get(i_k8sVolume, "spec.csi.volumeHandle", null);
return volume_handle == volume_id;
});
if (!k8sVolume) {
console.log(`volume ${volume_id} (${volume_context}) is NOT in k8s`);
if (process.env.DRY_RUN == "1") {
continue;
}
let del = false;
if (process.env.AUTO_DELETE == "1") {
del = true;
} else {
res = await prompt.get([
{
name: "delete",
required: true,
type: "boolean",
},
]);
del = res.delete;
}
if (del) {
res = await runControllerDeleteVolume(volume_id);
console.log(`csi volume ${volume_id} deleted`);
} else {
console.log(`skipping delete of csi volume ${volume_id}`);
}
} else {
console.log(`volume ${volume_id} (${volume_context}) is in k8s`);
}
}
console.log("Fin");
}
if (require.main === module) {
(async function () {
try {
await main();
} catch (e) {
console.log(e);
}
})();
}

View File

@ -1,23 +1,16 @@
#!/usr/bin/env -S node --nouse-idle-notification --expose-gc #!/usr/bin/env -S node --nouse-idle-notification --expose-gc
const { grpc } = require("../src/utils/grpc"); const yaml = require("js-yaml");
const fs = require("fs");
let options;
const args = require("yargs") const args = require("yargs")
.env("DEMOCRATIC_CSI_LIVENESS_PROBE") .env("DEMOCRATIC_CSI")
.scriptName("liveness-probe") .scriptName("democratic-csi")
.usage("$0 [options]") .usage("$0 [options]")
.option("csi-version", { .option("csi-version", {
describe: "versin of the csi spec to load", describe: "versin of the csi spec to load",
choices: [ choices: ["0.2.0", "0.3.0", "1.0.0", "1.1.0", "1.2.0"],
"0.2.0",
"0.3.0",
"1.0.0",
"1.1.0",
"1.2.0",
"1.3.0",
"1.4.0",
"1.5.0",
],
}) })
.demandOption(["csi-version"], "csi-version is required") .demandOption(["csi-version"], "csi-version is required")
.option("csi-address", { .option("csi-address", {
@ -31,8 +24,10 @@ const args = require("yargs")
const package = require("../package.json"); const package = require("../package.json");
args.version = package.version; args.version = package.version;
//const grpc = require("grpc");
const grpc = require("grpc-uds");
const protoLoader = require("@grpc/proto-loader"); const protoLoader = require("@grpc/proto-loader");
const csiVersion = process.env.CSI_VERSION || args.csiVersion || "1.5.0"; const csiVersion = process.env.CSI_VERSION || "1.1.0";
const PROTO_PATH = __dirname + "/../csi_proto/csi-v" + csiVersion + ".proto"; const PROTO_PATH = __dirname + "/../csi_proto/csi-v" + csiVersion + ".proto";
// Suggested options for similarity to existing grpc.load behavior // Suggested options for similarity to existing grpc.load behavior
@ -47,24 +42,14 @@ const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition); const protoDescriptor = grpc.loadPackageDefinition(packageDefinition);
const csi = protoDescriptor.csi.v1; const csi = protoDescriptor.csi.v1;
let csiAddress = args.csiAddress;
const tcpRegex = /[^\:]+:[0-9]*$/;
if (
!tcpRegex.test(csiAddress) &&
!csiAddress.toLowerCase().startsWith("unix://")
) {
csiAddress = "unix://" + csiAddress;
}
const clientIdentity = new csi.Identity( const clientIdentity = new csi.Identity(
csiAddress, args.csiAddress,
grpc.credentials.createInsecure() grpc.credentials.createInsecure()
); );
/** /**
* Probe the identity service and check for ready state * Probe the identity service and check for ready state
* *
* https://github.com/kubernetes-csi/livenessprobe/blob/master/cmd/livenessprobe/main.go * https://github.com/kubernetes-csi/livenessprobe/blob/master/cmd/livenessprobe/main.go
* https://github.com/kubernetes-csi/csi-lib-utils/blob/master/rpc/common.go * https://github.com/kubernetes-csi/csi-lib-utils/blob/master/rpc/common.go
*/ */

View File

@ -1,19 +0,0 @@
Write-Output "current user"
whoami
Write-Output "current working directory"
(Get-Location).Path
Write-Output "current PATH"
$Env:PATH
Write-Output "node version"
node --version
Write-Output "npm version"
npm --version
# install deps
Write-Output "running npm i"
npm i
Write-Output "creating tar.gz"
# tar node_modules to keep the number of files low to upload
tar -zcf node_modules-windows-amd64.tar.gz node_modules

View File

@ -1,15 +0,0 @@
#!/bin/bash
set -e
set -x
export PATH="/usr/local/lib/nodejs/bin:${PATH}"
node --version
npm --version
# install deps
npm i
# tar node_modules to keep the number of files low to upload
tar -zcf node_modules-linux-amd64.tar.gz node_modules

View File

@ -1,16 +0,0 @@
#Set-StrictMode -Version Latest
#$ErrorActionPreference = "Stop"
#$PSDefaultParameterValues['*:ErrorAction'] = "Stop"
function ThrowOnNativeFailure {
if (-not $?) {
throw 'Native Failure'
}
}
function psenvsubstr($data) {
foreach($v in Get-ChildItem env:) {
$key = '${' + $v.Name + '}'
$data = $data.Replace($key, $v.Value)
}
return $data
}

View File

@ -1,15 +0,0 @@
if (! $PSScriptRoot) {
$PSScriptRoot = $args[0]
}
. "${PSScriptRoot}\helper.ps1"
Set-Location $env:PWD
Write-Output "launching csi-grpc-proxy"
$env:PROXY_TO = "npipe://" + $env:NPIPE_ENDPOINT
$env:BIND_TO = "unix://" + $env:CSI_ENDPOINT
# https://stackoverflow.com/questions/2095088/error-when-calling-3rd-party-executable-from-powershell-when-using-an-ide
csi-grpc-proxy.exe 2>&1 | % { "$_" }

View File

@ -1,69 +0,0 @@
if (! $PSScriptRoot) {
$PSScriptRoot = $args[0]
}
. "${PSScriptRoot}\helper.ps1"
Set-Location $env:PWD
$exit_code = 0
$tmpdir = New-Item -ItemType Directory -Path ([System.IO.Path]::GetTempPath()) -Name ([System.IO.Path]::GetRandomFileName())
$env:CSI_SANITY_TEMP_DIR = $tmpdir.FullName
# cleanse endpoint to something csi-sanity plays nicely with
$endpoint = ${env:CSI_ENDPOINT}
$endpoint = $endpoint.replace("C:\", "/")
$endpoint = $endpoint.replace("\", "/")
if (! $env:CSI_SANITY_FAILFAST) {
$env:CSI_SANITY_FAILFAST = "false"
}
Write-Output "launching csi-sanity"
Write-Output "connecting to: ${endpoint}"
Write-Output "failfast: ${env:CSI_SANITY_FAILFAST}"
Write-Output "skip: ${env:CSI_SANITY_SKIP}"
Write-Output "focus: ${env:CSI_SANITY_FOCUS}"
Write-Output "csi.mountdir: ${env:CSI_SANITY_TEMP_DIR}\mnt"
Write-Output "csi.stagingdir: ${env:CSI_SANITY_TEMP_DIR}\stage"
$exe = "csi-sanity.exe"
$exeargs = @()
$exeargs += "-csi.endpoint", "unix://${endpoint}"
$exeargs += "-csi.mountdir", "${env:CSI_SANITY_TEMP_DIR}\mnt"
$exeargs += "-csi.stagingdir", "${env:CSI_SANITY_TEMP_DIR}\stage"
$exeargs += "-csi.testvolumeexpandsize", "2147483648"
$exeargs += "-csi.testvolumesize", "1073741824"
$exeargs += "--csi.secrets", "${env:CSI_SANITY_SECRETS}"
$exeargs += "-ginkgo.skip", "${env:CSI_SANITY_SKIP}"
$exeargs += "-ginkgo.focus", "${env:CSI_SANITY_FOCUS}"
if ($env:CSI_SANITY_FAILFAST -eq "true") {
$exeargs += "-ginkgo.fail-fast"
}
Write-Output "csi-sanity command: $exe $($exeargs -join ' ')"
&$exe $exeargs
if (-not $?) {
$exit_code = $LASTEXITCODE
Write-Output "csi-sanity exit code: ${exit_code}"
if ($exit_code -gt 0) {
$exit_code = 1
}
}
# remove tmp dir
Remove-Item -Path "$env:CSI_SANITY_TEMP_DIR" -Force -Recurse
#Exit $exit_code
Write-Output "exiting with exit code: ${exit_code}"
if ($exit_code -gt 0) {
throw "csi-sanity failed"
}
# these do not work for whatever reason
#Exit $exit_code
#[System.Environment]::Exit($exit_code)

View File

@ -1,43 +0,0 @@
#!/bin/bash
set -e
set -x
: ${CI_BUILD_KEY:="local"}
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
: ${CSI_SANITY_TEMP_DIR:=$(mktemp -d -t ci-csi-sanity-tmp-XXXXXXXX)}
if [[ ! -S "${CSI_ENDPOINT}" ]]; then
echo "csi socket: ${CSI_ENDPOINT} does not exist"
exit 1
fi
trap ctrl_c INT
function ctrl_c() {
echo "Trapped CTRL-C"
exit 1
}
chmod g+w,o+w "${CSI_ENDPOINT}"
mkdir -p "${CSI_SANITY_TEMP_DIR}"
rm -rf "${CSI_SANITY_TEMP_DIR}"/*
chmod -R 777 "${CSI_SANITY_TEMP_DIR}"
# https://github.com/kubernetes-csi/csi-test/tree/master/cmd/csi-sanity
# FOR DEBUG: --ginkgo.v
# --csi.secrets=<path to secrets file>
#
# expand size 2073741824 to have mis-alignments
# expand size 2147483648 to have everything line up nicely
csi-sanity --csi.endpoint "unix://${CSI_ENDPOINT}" \
--csi.mountdir "${CSI_SANITY_TEMP_DIR}/mnt" \
--csi.stagingdir "${CSI_SANITY_TEMP_DIR}/stage" \
--csi.testvolumeexpandsize 2147483648 \
--csi.testvolumesize 1073741824 \
--csi.secrets="${CSI_SANITY_SECRETS}" \
-ginkgo.skip "${CSI_SANITY_SKIP}" \
-ginkgo.focus "${CSI_SANITY_FOCUS}"
rm -rf "${CSI_SANITY_TEMP_DIR}"

View File

@ -1,29 +0,0 @@
if (! $PSScriptRoot) {
$PSScriptRoot = $args[0]
}
. "${PSScriptRoot}\helper.ps1"
Set-Location $env:PWD
Write-Output "launching server"
$env:LOG_LEVEL = "debug"
$env:CSI_VERSION = "1.9.0"
$env:CSI_NAME = "driver-test"
$env:CSI_SANITY = "1"
if (! ${env:CONFIG_FILE}) {
$env:CONFIG_FILE = $env:TEMP + "\csi-config-" + $env:CI_BUILD_KEY + ".yaml"
if ($env:TEMPLATE_CONFIG_FILE) {
$config_data = Get-Content "${env:TEMPLATE_CONFIG_FILE}" -Raw
$config_data = psenvsubstr($config_data)
$config_data | Set-Content "${env:CONFIG_FILE}"
}
}
node "${PSScriptRoot}\..\..\bin\democratic-csi" `
--log-level "$env:LOG_LEVEL" `
--driver-config-file "$env:CONFIG_FILE" `
--csi-version "$env:CSI_VERSION" `
--csi-name "$env:CSI_NAME" `
--server-socket "${env:NPIPE_ENDPOINT}" 2>&1 | % { "$_" }

View File

@ -1,29 +0,0 @@
#!/bin/bash
set -e
set -x
export PATH="/usr/local/lib/nodejs/bin:${PATH}"
echo "current launch-server PATH: ${PATH}"
: ${CI_BUILD_KEY:="local"}
: ${TEMPLATE_CONFIG_FILE:=${1}}
: ${CSI_MODE:=""}
: ${CSI_VERSION:="1.9.0"}
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
: ${LOG_PATH:=/tmp/csi-${CI_BUILD_KEY}.log}
if [[ "x${CONFIG_FILE}" == "x" ]]; then
: ${CONFIG_FILE:=/tmp/csi-config-${CI_BUILD_KEY}.yaml}
if [[ "x${TEMPLATE_CONFIG_FILE}" != "x" ]]; then
envsubst <"${TEMPLATE_CONFIG_FILE}" >"${CONFIG_FILE}"
fi
fi
if [[ "x${CSI_MODE}" != "x" ]]; then
EXTRA_ARGS="--csi-mode ${CSI_MODE} ${EXTRA_ARGS}"
fi
# > "${LOG_PATH}" 2>&1
exec ./bin/democratic-csi --log-level debug --driver-config-file "${CONFIG_FILE}" --csi-version "${CSI_VERSION}" --csi-name "driver-test" --server-socket "${CSI_ENDPOINT}" ${EXTRA_ARGS}

View File

@ -1,133 +0,0 @@
# https://stackoverflow.com/questions/2095088/error-when-calling-3rd-party-executable-from-powershell-when-using-an-ide
#
# Examples:
#
# $mypath = $MyInvocation.MyCommand.Path
# Get-ChildItem env:\
# Get-Job | Where-Object -Property State -eq “Running”
# Get-Location (like pwd)
# if ($null -eq $env:FOO) { $env:FOO = 'bar' }
. "${PSScriptRoot}\helper.ps1"
#Set-PSDebug -Trace 2
Write-Output "current user"
whoami
Write-Output "current working directory"
(Get-Location).Path
Write-Output "current PATH"
$Env:PATH
function Job-Cleanup() {
Get-Job | Stop-Job
Get-Job | Remove-Job
}
# start clean
Job-Cleanup
# install from artifacts
if ((Test-Path "node_modules-windows-amd64.tar.gz") -and !(Test-Path "node_modules")) {
Write-Output "extracting node_modules-windows-amd64.tar.gz"
tar -zxf node_modules-windows-amd64.tar.gz
}
# setup env
$env:PWD = (Get-Location).Path
$env:CI_BUILD_KEY = ([guid]::NewGuid() -Split "-")[0]
$env:CSI_ENDPOINT = $env:TEMP + "\csi-sanity-" + $env:CI_BUILD_KEY + ".sock"
$env:NPIPE_ENDPOINT = "//./pipe/csi-sanity-" + $env:CI_BUILD_KEY + "csi.sock"
# testing values
if (Test-Path "${PSScriptRoot}\run-dev.ps1") {
. "${PSScriptRoot}\run-dev.ps1"
}
# launch server
$server_job = Start-Job -FilePath .\ci\bin\launch-server.ps1 -InitializationScript {} -ArgumentList $PSScriptRoot
# launch csi-grpc-proxy
$csi_grpc_proxy_job = Start-Job -FilePath .\ci\bin\launch-csi-grpc-proxy.ps1 -InitializationScript {} -ArgumentList $PSScriptRoot
# wait for socket to appear
$iter = 0
$max_iter = 60
$started = 1
while (!(Test-Path "${env:CSI_ENDPOINT}")) {
$iter++
Write-Output "Waiting for ${env:CSI_ENDPOINT} to appear"
Start-Sleep 1
try {
Get-Job | Receive-Job
} catch {}
if ($iter -gt $max_iter) {
Write-Output "${env:CSI_ENDPOINT} failed to appear"
$started = 0
break
}
}
# launch csi-sanity
if ($started -eq 1) {
$csi_sanity_job = Start-Job -FilePath .\ci\bin\launch-csi-sanity.ps1 -InitializationScript {} -ArgumentList $PSScriptRoot
}
# https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/get-job?view=powershell-7.2
# -ChildJobState
$iter = 0
while ($csi_sanity_job -and ($csi_sanity_job.State -eq "Running" -or $csi_sanity_job.State -eq "NotStarted")) {
$iter++
foreach ($job in Get-Job) {
if (($job -eq $csi_grpc_proxy_job) -and ($iter -gt 20)) {
continue
}
if (!$job.HasMoreData) {
continue
}
try {
$job | Receive-Job
}
catch {
if ($job.State -ne "Failed") {
Write-Output "failure receiving job data: ${_}"
# just swallow the errors as it seems there are various reasons errors
# may show up (perhaps no data currently, etc)
#$job | fl
#throw $_
}
}
}
}
# spew any remaining job output to the console
foreach ($job in Get-Job) {
if ($job -eq $csi_grpc_proxy_job) {
continue
}
try {
$job | Receive-Job
}
catch {}
}
# wait for good measure
if ($csi_sanity_job) {
Wait-Job -Job $csi_sanity_job
}
#Get-Job | fl
$exit_code = 0
if (! $csi_sanity_job) {
$exit_code = 1
}
if ($csi_sanity_job -and $csi_sanity_job.State -eq "Failed") {
$exit_code = 1
}
# cleanup after ourselves
Job-Cleanup
Exit $exit_code

View File

@ -1,46 +0,0 @@
#!/bin/bash
set -e
set -x
_term() {
# no idea why this does not work
#[[ -n "${SUDO_PID}" ]] && sudo kill -15 "${SUDO_PID}"
[[ -n "${SUDO_PID}" ]] && sudo kill -15 $(pgrep -P "${SUDO_PID}") || true
}
trap _term EXIT
export PATH="/usr/local/lib/nodejs/bin:${PATH}"
# install deps
#npm i
# install from artifacts
if [[ -f "node_modules-linux-amd64.tar.gz" && ! -d "node_modules" ]];then
tar -zxf node_modules-linux-amd64.tar.gz
fi
# generate key for paths etc
export CI_BUILD_KEY=$(uuidgen | cut -d "-" -f 1)
# launch the server
sudo -E ci/bin/launch-server.sh &
SUDO_PID=$!
# wait for server to launch
#sleep 10
: ${CSI_ENDPOINT:=/tmp/csi-${CI_BUILD_KEY}.sock}
iter=0
max_iter=60
while [ ! -S "${CSI_ENDPOINT}" ];do
((++iter))
echo "waiting for ${CSI_ENDPOINT} to appear"
sleep 1
if [[ $iter -gt $max_iter ]];then
echo "${CSI_ENDPOINT} failed to appear"
exit 1
fi
done
# launch csi-sanity
sudo -E ci/bin/launch-csi-sanity.sh

View File

@ -1,10 +0,0 @@
driver: nfs-client
instance_id:
nfs:
shareHost: ${SERVER_HOST}
shareBasePath: "/mnt/tank/client/nfs/${CI_BUILD_KEY}"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/mnt/client/nfs/${CI_BUILD_KEY}"
dirPermissionsMode: "0777"
dirPermissionsUser: 0
dirPermissionsGroup: 0

View File

@ -1,14 +0,0 @@
driver: smb-client
instance_id:
smb:
shareHost: ${SERVER_HOST}
shareBasePath: "${SHARE_NAME}/${CI_BUILD_KEY}"
# shareHost:shareBasePath should be mounted at this location in the controller container
controllerBasePath: "/mnt/client/smb/${CI_BUILD_KEY}"
dirPermissionsMode: "0777"
dirPermissionsUser: 0
dirPermissionsGroup: 0
node:
mount:
mount_flags: "username=smbroot,password=smbroot"

View File

@ -1,8 +0,0 @@
driver: local-hostpath
instance_id:
local-hostpath:
shareBasePath: "/tmp/local-hostpath/${CI_BUILD_KEY}/controller"
controllerBasePath: "/tmp/local-hostpath/${CI_BUILD_KEY}/controller"
dirPermissionsMode: "0777"
dirPermissionsUser: root
dirPermissionsGroup: root

View File

@ -1,20 +0,0 @@
driver: objectivefs
objectivefs:
pool: ${OBJECTIVEFS_POOL}
cli:
sudoEnabled: false
env:
OBJECTIVEFS_LICENSE: ${OBJECTIVEFS_LICENSE}
OBJECTSTORE: ${OBJECTIVEFS_OBJECTSTORE}
ENDPOINT: ${OBJECTIVEFS_ENDPOINT_PROTOCOL}://${OBJECTIVEFS_ENDPOINT_HOST}:${OBJECTIVEFS_ENDPOINT_PORT}
SECRET_KEY: ${OBJECTIVEFS_SECRET_KEY}
ACCESS_KEY: ${OBJECTIVEFS_ACCESS_KEY}
OBJECTIVEFS_PASSPHRASE: ${OBJECTIVEFS_PASSPHRASE}
_private:
csi:
volume:
idHash:
# max volume name length is 63
strategy: crc32

View File

@ -1,77 +0,0 @@
driver: synology-iscsi
httpConnection:
protocol: http
host: ${SYNOLOGY_HOST}
port: ${SYNOLOGY_PORT}
username: ${SYNOLOGY_USERNAME}
password: ${SYNOLOGY_PASSWORD}
allowInsecure: true
session: "democratic-csi-${CI_BUILD_KEY}"
serialize: true
synology:
volume: ${SYNOLOGY_VOLUME}
iscsi:
targetPortal: ${SYNOLOGY_HOST}
targetPortals: []
baseiqn: "iqn.2000-01.com.synology:XpenoDsm62x."
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
lunTemplate:
# btrfs thin provisioning
type: "BLUN"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
# 3pc = Hardware-assisted data transfer
# tpu = Space reclamation
# can_snapshot = Snapshot
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
#- dev_attrib: emulate_3pc
# enable: 1
#- dev_attrib: emulate_tpu
# enable: 0
#- dev_attrib: can_snapshot
# enable: 1
# btfs thick provisioning
# only zeroing and locking supported
#type: "BLUN_THICK"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
# ext4 thinn provisioning UI sends everything with enabled=0
#type: "THIN"
# ext4 thin with advanced legacy features set
# can only alter tpu (all others are set as enabled=1)
#type: "ADV"
#dev_attribs:
#- dev_attrib: emulate_tpu
# enable: 1
# ext4 thick
# can only alter caw
#type: "FILE"
#dev_attribs:
#- dev_attrib: emulate_caw
# enable: 1
lunSnapshotTemplate:
is_locked: true
# https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot
is_app_consistent: true
targetTemplate:
auth_type: 0
max_sessions: 0

View File

@ -1,77 +0,0 @@
driver: synology-iscsi
httpConnection:
protocol: http
host: ${SYNOLOGY_HOST}
port: ${SYNOLOGY_PORT}
username: ${SYNOLOGY_USERNAME}
password: ${SYNOLOGY_PASSWORD}
allowInsecure: true
session: "democratic-csi-${CI_BUILD_KEY}"
serialize: true
synology:
volume: ${SYNOLOGY_VOLUME}
iscsi:
targetPortal: ${SYNOLOGY_HOST}
targetPortals: []
baseiqn: "iqn.2000-01.com.synology:XpenoDsm62x."
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
lunTemplate:
# btrfs thin provisioning
type: "BLUN"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
# 3pc = Hardware-assisted data transfer
# tpu = Space reclamation
# can_snapshot = Snapshot
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
#- dev_attrib: emulate_3pc
# enable: 1
#- dev_attrib: emulate_tpu
# enable: 0
#- dev_attrib: can_snapshot
# enable: 1
# btfs thick provisioning
# only zeroing and locking supported
#type: "BLUN_THICK"
# tpws = Hardware-assisted zeroing
# caw = Hardware-assisted locking
#dev_attribs:
#- dev_attrib: emulate_tpws
# enable: 1
#- dev_attrib: emulate_caw
# enable: 1
# ext4 thinn provisioning UI sends everything with enabled=0
#type: "THIN"
# ext4 thin with advanced legacy features set
# can only alter tpu (all others are set as enabled=1)
#type: "ADV"
#dev_attribs:
#- dev_attrib: emulate_tpu
# enable: 1
# ext4 thick
# can only alter caw
#type: "FILE"
#dev_attribs:
#- dev_attrib: emulate_caw
# enable: 1
lunSnapshotTemplate:
is_locked: true
# https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot
is_app_consistent: true
targetTemplate:
auth_type: 0
max_sessions: 0

View File

@ -1,44 +0,0 @@
driver: freenas-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
sshConnection:
host: ${TRUENAS_HOST}
port: 22
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# overcome the 63 char limit for testing purposes only
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,35 +0,0 @@
driver: freenas-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
sshConnection:
host: ${TRUENAS_HOST}
port: 22
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: wheel
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,68 +0,0 @@
driver: freenas-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
sshConnection:
host: ${TRUENAS_HOST}
port: 22
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetProperties:
# smb options
aclmode: restricted
aclinherit: passthrough
acltype: nfsv4
casesensitivity: insensitive
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
datasetPermissionsAcls:
- "-m g:builtin_users:full_set:fd:allow"
- "-m group@:modify_set:fd:allow"
- "-m owner@:full_set:fd:allow"
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,37 +0,0 @@
driver: freenas-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
sshConnection:
host: ${TRUENAS_HOST}
port: 22
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@ -1,35 +0,0 @@
driver: freenas-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
sshConnection:
host: ${TRUENAS_HOST}
port: 22
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: wheel
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,68 +0,0 @@
driver: freenas-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
sshConnection:
host: ${TRUENAS_HOST}
port: 22
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetProperties:
# smb options
aclmode: restricted
aclinherit: passthrough
acltype: nfsv4
casesensitivity: insensitive
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
datasetPermissionsAcls:
- "-m g:builtin_users:full_set:fd:allow"
- "-m group@:modify_set:fd:allow"
- "-m owner@:full_set:fd:allow"
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,31 +0,0 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0

View File

@ -1,29 +0,0 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,50 +0,0 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,38 +0,0 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,29 +0,0 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,50 +0,0 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,38 +0,0 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,29 +0,0 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,50 +0,0 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,38 +0,0 @@
driver: freenas-api-iscsi
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${TRUENAS_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
targetGroups:
- targetGroupPortalGroup: 1
targetGroupInitiatorGroup: 1
targetGroupAuthType: None
targetGroupAuthGroup:
# 0-100 (0 == ignore)
extentAvailThreshold: 0
# https://github.com/SCST-project/scst/blob/master/scst/src/dev_handlers/scst_vdisk.c#L203
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,29 +0,0 @@
driver: freenas-api-nfs
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${TRUENAS_HOST}
shareAlldirs: false
shareAllowedHosts: []
shareAllowedNetworks: []
shareMaprootUser: root
shareMaprootGroup: root
shareMapallUser: ""
shareMapallGroup: ""

View File

@ -1,50 +0,0 @@
driver: freenas-api-smb
httpConnection:
protocol: http
host: ${TRUENAS_HOST}
port: 80
#apiKey:
username: ${TRUENAS_USERNAME}
password: ${TRUENAS_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: 1001
datasetPermissionsGroup: 1001
smb:
shareHost: ${TRUENAS_HOST}
#nameTemplate: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareAuxiliaryConfigurationTemplate: |
#guest ok = yes
#guest only = yes
shareHome: false
shareAllowedHosts: []
shareDeniedHosts: []
#shareDefaultPermissions: true
shareGuestOk: false
#shareGuestOnly: true
#shareShowHiddenFiles: true
shareRecycleBin: false
shareBrowsable: false
shareAccessBasedEnumeration: true
shareTimeMachine: false
#shareStorageTask:
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,31 +0,0 @@
driver: zfs-generic-iscsi
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${SERVER_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}"
nameSuffix: ""
shareStrategy: "targetCli"
shareStrategyTargetCli:
basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664"
tpg:
attributes:
authentication: 0
generate_node_acls: 1
cache_dynamic_acls: 1
demo_mode_write_protect: 0

View File

@ -1,40 +0,0 @@
driver: zfs-generic-smb
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetProperties:
#aclmode: restricted
#aclinherit: passthrough
#acltype: nfsv4
casesensitivity: insensitive
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: smbroot
datasetPermissionsGroup: smbroot
smb:
shareHost: ${SERVER_HOST}
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
sharesmb: "on"
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,31 +0,0 @@
driver: zfs-generic-iscsi
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
iscsi:
targetPortal: ${SERVER_HOST}
interface: ""
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareStrategy: "targetCli"
shareStrategyTargetCli:
basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664"
tpg:
attributes:
authentication: 0
generate_node_acls: 1
cache_dynamic_acls: 1
demo_mode_write_protect: 0

View File

@ -1,25 +0,0 @@
driver: zfs-generic-nfs
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0
nfs:
shareHost: ${SERVER_HOST}
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
#sharenfs: "on"
sharenfs: "rw,no_subtree_check,no_root_squash"

View File

@ -1,30 +0,0 @@
driver: zfs-generic-nvmeof
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:
nvmeof:
transports:
- "tcp://${SERVER_HOST}:4420"
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
nameSuffix: ""
shareStrategy: "nvmetCli"
shareStrategyNvmetCli:
basename: "nqn.2003-01.org.linux-nvmeof.ubuntu-19.x8664"
ports:
- "1"
subsystem:
attributes:
allow_any_host: 1

View File

@ -1,40 +0,0 @@
driver: zfs-generic-smb
sshConnection:
host: ${SERVER_HOST}
port: 22
username: ${SERVER_USERNAME}
password: ${SERVER_PASSWORD}
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetProperties:
#aclmode: restricted
#aclinherit: passthrough
#acltype: nfsv4
casesensitivity: insensitive
datasetEnableQuotas: true
datasetEnableReservation: false
datasetPermissionsMode: "0770"
datasetPermissionsUser: smbroot
datasetPermissionsGroup: smbroot
smb:
shareHost: ${SERVER_HOST}
shareStrategy: "setDatasetProperties"
shareStrategySetDatasetProperties:
properties:
sharesmb: "on"
node:
mount:
mount_flags: "username=smbroot,password=smbroot"
_private:
csi:
volume:
idHash:
strategy: crc16

View File

@ -1,10 +0,0 @@
driver: zfs-local-dataset
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
datasetEnableQuotas: true
datasetPermissionsMode: "0777"
datasetPermissionsUser: 0
datasetPermissionsGroup: 0

View File

@ -1,10 +0,0 @@
driver: zfs-local-zvol
zfs:
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
zvolCompression:
zvolDedup:
zvolEnableReservation: false
zvolBlocksize:

0
contrib/ctld-config-watchdog-db.sh Executable file → Normal file
View File

0
contrib/ctld-config-watchdog.sh Executable file → Normal file
View File

0
contrib/ctld-service-watchdog.sh Executable file → Normal file
View File

View File

@ -1,405 +0,0 @@
#!/bin/bash
set -e
#set -x
######## LINKS ########
# https://www.truenas.com/community/threads/moving-zfs-dataset-into-an-other.17720/
# https://www.truenas.com/community/threads/moving-a-zvol.76574/
# https://github.com/kubernetes/kubernetes/issues/77086
######## REQUIRMENTS #########
# kubectl
# curl
# jq
######### NOTES ############
# This script is meant to be downloaded and modified to your specific needs.
# The process is relatively intricate and the matrix of various options is
# quite large.
#
# It is highly recommended to create a test PV/PVC with the old provisioner to
# use as a playground to ensure you have things configured correctly and that
# the transition is smooth.
#
# From a high level the intent of this script is to:
# - update *existing* PVs/PVCs created with the non-csi provisioners to be managed by democratic-csi
# - ultimately after all PVs/PVCs have been migrated remove the non-csi provisioners from your cluster(s)
#
# To achieve the above goals the following happens:
# - each execution of the script is meant to migrate 1 PV/PVC to democratic-csi
# - original PV/PVC api object data is stored in ${PWD/tmp/*.json
# - PVC api object is deleted and recreated with proper values
# - PV api object is deleted and recreated with proper values
# - you will be required to `zfs rename` your old zfs zvols/dataset to place in the new democratic-csi structure
# - you will run several `curl` commands to update various share assets in TrueNAS via the API
# - you will run several `zfs` commands to set zfs properties on the datasets
# - you will be required to *stop* individual workloads using the volumes for a short period of time while migrating each PV/PVC
# - you should incure no data loss, after migration workloads should come up in exactly the same state they were before migration
#
# Several assumptions are made in this script
# - your intent is to use the *same* pool you used previously
# - you have already created/deployed democratic-csi
# - you have already deployed freenas-{nfs,iscsi}-provisioner(s)
# - you have direct access to the storage nas to run zfs commands
# - you can execute curl commands to manipulate shares/etc with the freenas api
# - where you execute the script should be setup with administrative kubectl/KUBECONFIG access
# currently only support v2 api so unused
# API_VERSION=2
FREENAS_USERNAME="root"
FREENAS_PASSWORD="secret"
FREENAS_URI="http://<ip>:port"
# where your pools get mounted
POOL_MNT_DIR="/mnt"
## nfs
#SHARE_STRATEGY="nfs" # hard-coded if migrating nfs-based volumes
#OLD_STORAGE_CLASS_NAME="freenas-nfs"
#NEW_STORAGE_CLASS_NAME="zfs-nfs"
#NEW_PARENT_DATASET="" # datasetParentName option from democratic-csi config
#PROVISIONER_IDENTITY="" # find this by looking at a PV created by the new storage class .spec.csi.volumeAttributes."storage.kubernetes.io/csiProvisionerIdentity"
#PROVISIONER_DRIVER="freenas-nfs"
#PROVISIONER_INSTANCE_ID="" # optional, should match the driver.instance_id attribute in your democratic-csi config
# should be the mountpoint, not the zfs path to the dataset
# zfs create tank/tmpnfs
# zfs destroy -r tank/tmpnfs
#TMP_ASSET="/mnt/tank/tmpnfs"
## end nfs
## iscsi
#SHARE_STRATEGY="iscsi" # hard-coded if migrating iscsi-based volumes
#OLD_STORAGE_CLASS_NAME="freenas-iscsi"
#NEW_STORAGE_CLASS_NAME="zfs-iscsi"
#NEW_PARENT_DATASET="" # datasetParentName option from democratic-csi config
#PROVISIONER_IDENTITY="" # find this by looking at a PV created by the new storage class .spec.csi.volumeAttributes."storage.kubernetes.io/csiProvisionerIdentity"
#PROVISIONER_DRIVER="freenas-iscsi"
#PROVISIONER_INSTANCE_ID="" # optional, should match the driver.instance_id attribute in your democratic-csi config
# should be the path to the zfs asset *not* a mountpath
# zfs create -V 1MB tank/tmpiscsi
# zfs destroy -r tank/tmpiscsi
#TMP_ASSET="tank/tmpiscsi"
# should match your iscsi.namePrefix/nameSuffix/template syntax in the democratic-csi config
# %s is replaced by the pvc-<id> string
#ISCSI_ASSET_NAME_TEMPLATE="csi.%s.primary"
## end iscsi
###### make sure you uncomment appropriate variable above in either the nfs or
###### iscsi block (just pick one of the blocks at a time), every thing below
###### here is script logic and should not need to be tampered with unless
###### special circumstances/configuration requires it
# get secret details
nscJSON=$(kubectl get sc "${NEW_STORAGE_CLASS_NAME}" -o json)
CONTROLLER_NAMESPACE=$(echo "${nscJSON}" | jq -crM '.parameters."csi.storage.k8s.io/controller-expand-secret-namespace"')
CE_SECRET_NAME=$(echo "${nscJSON}" | jq -crM '.parameters."csi.storage.k8s.io/controller-expand-secret-name"')
CP_SECRET_NAME=$(echo "${nscJSON}" | jq -crM '.parameters."csi.storage.k8s.io/controller-publish-secret-name"')
NP_SECRET_NAME=$(echo "${nscJSON}" | jq -crM '.parameters."csi.storage.k8s.io/node-publish-secret-name"')
NS_SECRET_NAME=$(echo "${nscJSON}" | jq -crM '.parameters."csi.storage.k8s.io/node-stage-secret-name"')
NEW_CSI_DRIVER_NAME=$(echo "${nscJSON}" | jq -crM ".provisioner")
function yes_or_no {
while true; do
read -p "$* [y/n]: " yn
case $yn in
[Yy]*) return 0 ;;
[Nn]*)
echo "Aborted"
return 1
;;
esac
done
}
for ipv in $(kubectl get pv -o json | jq -crM ".items[] | select(.spec.storageClassName|test(\"${OLD_STORAGE_CLASS_NAME}\")) | (.metadata.name,.spec.claimRef.namespace,.spec.claimRef.name)"); do
:
echo "${ipv}"
done
read -p "Which PV would you like to migrate? " TO_UPDATE
export pv="${TO_UPDATE}"
echo "migrating ${pv} to new provisioner"
# create temporary directory to store all original PV and PVC json
mkdir -p tmp
if [[ ! -f "tmp/${pv}-pv.json" ]]; then
pvJSON=$(kubectl get pv "${pv}" -o json)
echo "${pvJSON}" >>"tmp/${pv}-pv.json"
else
pvJSON=$(cat "tmp/${pv}-pv.json")
fi
npvJSON="${pvJSON}"
name=$(echo "${pvJSON}" | jq -crM ".metadata.name")
status=$(echo "${pvJSON}" | jq -crM ".status.phase")
reclaimPolicy=$(echo "${pvJSON}" | jq -crM ".spec.persistentVolumeReclaimPolicy")
if [[ ${SHARE_STRATEGY} == "nfs" ]]; then
:
pool=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.pool")
dataset=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.dataset")
shareId=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.shareId")
server=$(echo "${pvJSON}" | jq -crM ".spec.nfs.server")
path=$(echo "${pvJSON}" | jq -crM ".spec.nfs.path")
fsType="nfs"
npath="${POOL_MNT_DIR}/${NEW_PARENT_DATASET}/${pv}"
# only need to remove these from the new json
for annotation in shareId dataset datasetEnableQuotas datasetEnableReservation datasetParent datasetPreExisted freenasNFSProvisionerIdentity pool sharePreExisted; do
:
echo "removing annotation: ${annotation}"
npvJSON=$(echo "${npvJSON}" | jq "del(.metadata.annotations.${annotation})")
done
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.server = \"${server}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.share = \"${npath}\"")
src="${dataset}"
fi
if [[ ${SHARE_STRATEGY} == "iscsi" ]]; then
:
pool=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.pool")
zvol=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.zvol")
targetId=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.targetId")
extentId=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.extentId")
targetGroupId=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.targetGroupId")
targetToExtentId=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.targetToExtentId")
zvol=$(echo "${pvJSON}" | jq -crM ".metadata.annotations.zvol")
fsType=$(echo "${pvJSON}" | jq -crM ".spec.iscsi.fsType")
lun=$(echo "${pvJSON}" | jq -crM ".spec.iscsi.lun")
iqn=$(echo "${pvJSON}" | jq -crM ".spec.iscsi.iqn")
targetPortal=$(echo "${pvJSON}" | jq -crM ".spec.iscsi.targetPortal")
# only need to remove these from the new json
for annotation in datasetParent extentId freenasISCSIProvisionerIdentity iscsiName pool targetGroupId targetId targetToExtentId zvol; do
:
echo "removing annotation: ${annotation}"
npvJSON=$(echo "${npvJSON}" | jq "del(.metadata.annotations.${annotation})")
done
ISCSI_BASE_NAME="$(echo "${iqn}" | cut -d ":" -f1)"
ISCSI_ASSET_NAME=$(printf "${ISCSI_ASSET_NAME_TEMPLATE}" "${pv}")
niqn="${ISCSI_BASE_NAME}:${ISCSI_ASSET_NAME}"
npvJSON=$(echo "${npvJSON}" | jq '.spec.csi.volumeAttributes.interface = ""')
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.iqn = \"${niqn}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.lun = \"${lun}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.portal = \"${targetPortal}\"")
npvJSON=$(echo "${npvJSON}" | jq '.spec.csi.volumeAttributes.portals = ""')
src="${pool}/${zvol}"
fi
dst="${NEW_PARENT_DATASET}/${name}"
npvJSON=$(echo "${npvJSON}" | jq ".metadata.annotations.\"pv.kubernetes.io/provisioned-by\" = \"${NEW_CSI_DRIVER_NAME}\"")
# remove old, update old
npvJSON=$(echo "${npvJSON}" | jq "del(.metadata.resourceVersion)")
npvJSON=$(echo "${npvJSON}" | jq "del(.spec.nfs)")
npvJSON=$(echo "${npvJSON}" | jq "del(.spec.iscsi)")
npvJSON=$(echo "${npvJSON}" | jq ".spec.storageClassName = \"${NEW_STORAGE_CLASS_NAME}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.driver = \"${NEW_CSI_DRIVER_NAME}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeHandle = \"${name}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.fsType = \"${fsType}\"")
npvJSON=$(echo "${npvJSON}" | jq '.spec.persistentVolumeReclaimPolicy = "Retain"')
# secrets
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.controllerExpandSecretRef.name = \"${CE_SECRET_NAME}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.controllerExpandSecretRef.namespace = \"${CONTROLLER_NAMESPACE}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.controllerPublishSecretRef.name = \"${CP_SECRET_NAME}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.controllerPublishSecretRef.namespace = \"${CONTROLLER_NAMESPACE}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.nodePublishSecretRef.name = \"${NP_SECRET_NAME}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.nodePublishSecretRef.namespace = \"${CONTROLLER_NAMESPACE}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.nodeStageSecretRef.name = \"${NS_SECRET_NAME}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.nodeStageSecretRef.namespace = \"${CONTROLLER_NAMESPACE}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.node_attach_driver = \"${SHARE_STRATEGY}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.provisioner_driver = \"${PROVISIONER_DRIVER}\"")
npvJSON=$(echo "${npvJSON}" | jq ".spec.csi.volumeAttributes.\"storage.kubernetes.io/csiProvisionerIdentity\" = \"${PROVISIONER_IDENTITY}\"")
if [[ ${status} == "Bound" ]]; then
:
# ensure any workloads are shutdown
yes_or_no "Please type y when all workloads using the PV/PVC have been scaled to 0"
yes_or_no "Are you certain nothing is using the share?"
claimName=$(echo "${pvJSON}" | jq -crM ".spec.claimRef.name")
claimNamespace=$(echo "${pvJSON}" | jq -crM ".spec.claimRef.namespace")
echo "${claimNamespace}/${claimName}"
if [[ ! -f "tmp/${pv}-pvc.json" ]]; then
pvcJSON=$(kubectl -n "${claimNamespace}" get pvc "${claimName}" -o json)
echo "${pvcJSON}" >>"tmp/${pv}-pvc.json"
else
pvcJSON=$(cat "tmp/${pv}-pvc.json")
fi
npvcJSON="${pvcJSON}"
kubectl patch pv "${name}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
kubectl -n "${claimNamespace}" delete pvc "${claimName}" --wait=false || true
sleep 3
kubectl -n "${claimNamespace}" patch pvc "${claimName}" -p '{"metadata":{"finalizers": null }}' || true
sleep 3
# update pvc
npvcJSON=$(echo "${npvcJSON}" | jq "del(.metadata.resourceVersion)")
npvcJSON=$(echo "${npvcJSON}" | jq ".metadata.annotations.\"volume.beta.kubernetes.io/storage-provisioner\" = \"${NEW_CSI_DRIVER_NAME}\"")
npvcJSON=$(echo "${npvcJSON}" | jq ".spec.storageClassName = \"${NEW_STORAGE_CLASS_NAME}\"")
# recreate pvc
echo "${npvcJSON}" | jq .
yes_or_no "Would you like to contiue with the update to the PVC with the above details? "
echo "${npvcJSON}" | kubectl apply -f -
# get pvc .metadata.uid
uid=$(kubectl -n "${claimNamespace}" get pvc "${claimName}" -o jsonpath='{.metadata.uid}')
# set pv .spec.claimRef.uid
#npvJSON="${pvJSON}"
npvJSON=$(echo "${npvJSON}" | jq "del(.metadata.resourceVersion)")
npvJSON=$(echo "${npvJSON}" | jq ".spec.claimRef.uid = \"${uid}\"")
# wait for things to settle and all should be well
sleep 3
fi
if [[ ${status} == "Released" ]]; then
yes_or_no "PV status is Released, not updating PVC details, is this OK?"
fi
echo "${npvJSON}" | jq .
yes_or_no "Would you like to contiue with the update to the PV with the above details? " && {
:
echo "starting PV update PV ${pv}"
kubectl patch pv "${name}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
kubectl delete pv "${name}"
echo "${npvJSON}" | kubectl apply -f -
echo "successfully updated PV ${pv}"
} || {
:
echo "you decided no"
}
if [[ -z ${src} || ${src} == "null" || ${src} == "null" ]]; then
read -p "Prompt for src zvol/dataset (share path: ${path}): " src
fi
if [[ ${SHARE_STRATEGY} == "nfs" ]]; then
if [[ -z ${shareId} || ${shareId} == "null" ]]; then
echo "Edit the share in the FreeNAS UI and observe the id in the URL address bar"
read -p "shareId: " shareId
fi
fi
echo ""
echo ""
yes_or_no "Do you understand that you *must* execute all the commands shown after this message in the *exact* order shown? You cannot skip any of them, they all must succeed (including 200s from the curl commands)." && {
echo "OK then, moving on :)"
} || {
echo "It's best you stop here"
exit 1
}
echo ""
echo ""
echo "################## commands to run on TrueNAS cli #############################"
echo ""
echo "# set properties"
# common
if [[ -n ${PROVISIONER_INSTANCE_ID} ]]; then
echo "zfs set democratic-csi:volume_context_provisioner_instance_id=${PROVISIONER_INSTANCE_ID} ${src}"
fi
echo "zfs set democratic-csi:csi_volume_name=${pv} ${src}"
echo "zfs set democratic-csi:provision_success=true ${src}"
echo "zfs set democratic-csi:managed_resource=true ${src}"
if [[ ${SHARE_STRATEGY} == "nfs" ]]; then
# nfs
volume_context="{\"node_attach_driver\":\"nfs\",\"server\":\"${server}\",\"share\":\"${npath}\"}"
echo "zfs set democratic-csi:csi_share_volume_context='${volume_context}' ${src}"
echo "zfs set democratic-csi:freenas_nfs_share_id=${shareId} ${src}"
echo "zfs set democratic-csi:volume_context_provisioner_driver=freenas-nfs ${src}"
fi
if [[ ${SHARE_STRATEGY} == "iscsi" ]]; then
# iscsi
echo "zfs set democratic-csi:freenas_iscsi_assets_name=${ISCSI_ASSET_NAME} ${src}"
volume_context="{\"node_attach_driver\":\"iscsi\",\"portal\":\"${targetPortal}\",\"portals\":\"\",\"interface\":\"\",\"iqn\":\"${niqn}\",\"lun\":${lun}}"
echo "zfs set democratic-csi:csi_share_volume_context='${volume_context}' ${src}"
echo "zfs set democratic-csi:freenas_iscsi_target_id=${targetId} ${src}"
echo "zfs set democratic-csi:freenas_iscsi_extent_id=${extentId} ${src}"
echo "zfs set democratic-csi:freenas_iscsi_targettoextent_id=${targetToExtentId} ${src}"
echo "zfs set democratic-csi:volume_context_provisioner_driver=freenas-iscsi ${src}"
fi
echo ""
echo ""
echo "################## end commands to run on FreeNAS cli #############################"
echo ""
echo "#################### API curl command to update share #########################"
echo ""
# update shares to point to new location of vol/dataset
# rename dataset/zvol
if [[ ${SHARE_STRATEGY} == "nfs" ]]; then
# nfs
:
echo "# temporarily assign share to different path to free up dataset for rename"
echo "curl -v -u\"${FREENAS_USERNAME}:${FREENAS_PASSWORD}\" -H \"Content-Type: application/json\" -H \"Accept: application/json\" -XPUT \"${FREENAS_URI}/api/v2.0/sharing/nfs/id/${shareId}\" -d '{\"paths\":[\"${TMP_ASSET}\"]}'"
echo ""
echo "# rename asset"
echo "zfs rename -p -f ${src} ${dst}"
echo ""
echo "# re-associate the share with the dataset"
echo "curl -v -u\"${FREENAS_USERNAME}:${FREENAS_PASSWORD}\" -H \"Content-Type: application/json\" -H \"Accept: application/json\" -XPUT \"${FREENAS_URI}/api/v2.0/sharing/nfs/id/${shareId}\" -d '{\"paths\":[\"${npath}\"]}'"
fi
if [[ ${SHARE_STRATEGY} == "iscsi" ]]; then
# iscsi
:
echo "# temporarily assign extent to different asset to free up zvol for rename"
echo "curl -v -u\"${FREENAS_USERNAME}:${FREENAS_PASSWORD}\" -H \"Content-Type: application/json\" -H \"Accept: application/json\" -XPUT \"${FREENAS_URI}/api/v2.0/iscsi/extent/id/${extentId}\" -d '{\"path\":\"zvol/${TMP_ASSET}\", \"disk\":\"zvol/${TMP_ASSET}\"}'"
echo ""
echo "# rename asset"
echo "zfs rename -p -f ${src} ${dst}"
echo ""
echo "curl -v -u\"${FREENAS_USERNAME}:${FREENAS_PASSWORD}\" -H \"Content-Type: application/json\" -H \"Accept: application/json\" -XPUT \"${FREENAS_URI}/api/v2.0/iscsi/target/id/${targetId}\" -d '{\"name\":\"${ISCSI_ASSET_NAME}\"}'"
echo "curl -v -u\"${FREENAS_USERNAME}:${FREENAS_PASSWORD}\" -H \"Content-Type: application/json\" -H \"Accept: application/json\" -XPUT \"${FREENAS_URI}/api/v2.0/iscsi/extent/id/${extentId}\" -d '{\"name\":\"${ISCSI_ASSET_NAME}\", \"path\":\"zvol/${dst}\", \"disk\":\"zvol/${dst}\"}'"
fi
echo ""
echo "################## end API curl command to update share #############################"
echo "################## final cleanup ######################"
echo ""
echo "# ensure volumes are bound/etc as appropriate and restart your workloads here and ensure all is well"
echo ""
echo "# restore original reclaim policy"
echo "kubectl patch pv \"${name}\" -p '{\"spec\":{\"persistentVolumeReclaimPolicy\":\"${reclaimPolicy}\"}}'"
echo ""
echo "################## end final cleanup ######################"

View File

@ -1,82 +0,0 @@
#!/bin/bash
######## REQUIRMENTS #########
# kubectl
# yq (https://github.com/mikefarah/yq)
# a valid EDITOR env variable set
set -e
#set -x
function yes_or_no {
while true; do
read -p "$* [y/n]: " yn
case $yn in
[Yy]*) return 0 ;;
[Nn]*)
return 1
;;
esac
done
}
PV=${1}
if [[ -z ${PV} ]]; then
echo "must supply a PV name"
exit 1
fi
PV_ORIG_FILE="/tmp/${PV}-orig.yaml"
PV_TMP_FILE="/tmp/${PV}-tmp.yaml"
# save original
if [[ -f ${PV_ORIG_FILE} ]]; then
yes_or_no "It appears we already made a backup of ${PV}. Would you like to use the existing backup? (if no, a fresh backup will be created)" && {
:
} || {
rm "${PV_ORIG_FILE}"
}
fi
if [[ ! -f ${PV_ORIG_FILE} ]]; then
kubectl get pv "${PV}" -o yaml >"${PV_ORIG_FILE}"
fi
reclaimPolicy=$(yq '.spec.persistentVolumeReclaimPolicy' "${PV_ORIG_FILE}")
# copy file for editing
cp "${PV_ORIG_FILE}" "${PV_TMP_FILE}"
# pre-process before edit
yq -i -y 'del(.metadata.resourceVersion)' "${PV_TMP_FILE}"
# manually edit
${EDITOR} "${PV_TMP_FILE}"
# ask if looks good
yq '.' "${PV_TMP_FILE}"
yes_or_no "Would you like to delete the existing PV object and recreate with the above data?"
# set relaim to Retain on PV
kubectl patch pv "${PV}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
# delete PV from API
kubectl delete pv "${PV}" --wait=false
kubectl patch pv "${PV}" -p '{"metadata":{"finalizers": null }}' &>/dev/null || true
# re-apply newly updated file
kubectl apply -f "${PV_TMP_FILE}"
# restore original reclaim value
kubectl patch pv "${PV}" -p "{\"spec\":{\"persistentVolumeReclaimPolicy\":${reclaimPolicy}}}"
# spit out any zfs properties updates
yes_or_no "Would you like to delete the PV backup file?" && {
rm "${PV_ORIG_FILE}"
} || {
:
}
rm "${PV_TMP_FILE}"
echo "Edit complete!"

View File

@ -1,108 +0,0 @@
#!/bin/bash
# simple script to 'start' nvmet on TrueNAS SCALE
#
# to reinstall nvmetcli simply rm /usr/sbin/nvmetcli
# debug
#set -x
# exit non-zero
set -e
SCRIPTDIR="$(
cd -- "$(dirname "$0")" >/dev/null 2>&1
pwd -P
)"
cd "${SCRIPTDIR}"
: "${NVMETCONFIG:="${SCRIPTDIR}/nvmet-config.json"}"
: "${NVMETVENV:="${SCRIPTDIR}/nvmet-venv"}"
export PATH=${HOME}/.local/bin:${PATH}
main() {
kernel_modules
nvmetcli ls &>/dev/null || {
setup_venv
install_nvmetcli
}
nvmetcli_restore
}
kernel_modules() {
modules=()
modules+=("nvmet")
modules+=("nvmet-fc")
modules+=("nvmet-rdma")
modules+=("nvmet-tcp")
for module in "${modules[@]}"; do
modprobe "${module}"
done
}
setup_venv() {
rm -rf ${NVMETVENV}
python -m venv ${NVMETVENV} --without-pip --system-site-packages
activate_venv
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python get-pip.py
rm get-pip.py
deactivate_venv
}
activate_venv() {
. ${NVMETVENV}/bin/activate
}
deactivate_venv() {
deactivate
}
install_nvmetcli() {
if [[ ! -d nvmetcli ]]; then
git clone git://git.infradead.org/users/hch/nvmetcli.git
fi
cd nvmetcli
activate_venv
# install to root home dir
python3 setup.py install --install-scripts=${HOME}/.local/bin
# install to root home dir
pip install configshell_fb==1.1.30
# remove source
cd "${SCRIPTDIR}"
rm -rf nvmetcli
deactivate_venv
}
nvmetcli_restore() {
activate_venv
cd "${SCRIPTDIR}"
nvmetcli restore "${NVMETCONFIG}"
deactivate_venv
touch /var/run/nvmet-config-loaded
chmod +r /var/run/nvmet-config-loaded
}
main

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +0,0 @@
#!/bin/bash
# v1.6.0
VERSION=${1}
curl -v -o "csi-${VERSION}.proto" https://raw.githubusercontent.com/container-storage-interface/spec/${VERSION}/csi.proto

View File

@ -1,111 +0,0 @@
syntax = "proto3";
package v1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1";
service Disk {
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
// disk devices enumerated by the host.
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
// PartitionDisk initializes and partitions a disk device with the GPT partition style
// (if the disk has not been partitioned already) and returns the resulting volume device ID.
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
// Rescan refreshes the host's storage cache.
rpc Rescan(RescanRequest) returns (RescanResponse) {}
// ListDiskIDs returns a map of DiskID objects where the key is the disk number.
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
// GetDiskStats returns the stats of a disk (currently it returns the disk size).
rpc GetDiskStats(GetDiskStatsRequest) returns (GetDiskStatsResponse) {}
// SetDiskState sets the offline/online state of a disk.
rpc SetDiskState(SetDiskStateRequest) returns (SetDiskStateResponse) {}
// GetDiskState gets the offline/online state of a disk.
rpc GetDiskState(GetDiskStateRequest) returns (GetDiskStateResponse) {}
}
message ListDiskLocationsRequest {
// Intentionally empty.
}
message DiskLocation {
string Adapter = 1;
string Bus = 2;
string Target = 3;
string LUNID = 4;
}
message ListDiskLocationsResponse {
// Map of disk number and <adapter, bus, target, lun ID> associated with each disk device.
map <uint32, DiskLocation> disk_locations = 1;
}
message PartitionDiskRequest {
// Disk device number of the disk to partition.
uint32 disk_number = 1;
}
message PartitionDiskResponse {
// Intentionally empty.
}
message RescanRequest {
// Intentionally empty.
}
message RescanResponse {
// Intentionally empty.
}
message ListDiskIDsRequest {
// Intentionally empty.
}
message DiskIDs {
// The disk page83 id.
string page83 = 1;
// The disk serial number.
string serial_number = 2;
}
message ListDiskIDsResponse {
// Map of disk numbers and disk identifiers associated with each disk device.
map <uint32, DiskIDs> diskIDs = 1; // the case is intentional for protoc to generate the field as DiskIDs
}
message GetDiskStatsRequest {
// Disk device number of the disk to get the stats from.
uint32 disk_number = 1;
}
message GetDiskStatsResponse {
// Total size of the volume.
int64 total_bytes = 1;
}
message SetDiskStateRequest {
// Disk device number of the disk.
uint32 disk_number = 1;
// Online state to set for the disk. true for online, false for offline.
bool is_online = 2;
}
message SetDiskStateResponse {
// Intentionally empty.
}
message GetDiskStateRequest {
// Disk device number of the disk.
uint32 disk_number = 1;
}
message GetDiskStateResponse {
// Online state of the disk. true for online, false for offline.
bool is_online = 1;
}

View File

@ -1,62 +0,0 @@
syntax = "proto3";
package v1alpha1;
service Disk {
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
// disk devices enumerated by the host
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
// PartitionDisk initializes and partitions a disk device (if the disk has not
// been partitioned already) and returns the resulting volume device ID
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
// Rescan refreshes the host's storage cache
rpc Rescan(RescanRequest) returns (RescanResponse) {}
// GetDiskNumberByName returns disk number based on the passing disk name information
rpc GetDiskNumberByName(GetDiskNumberByNameRequest) returns (GetDiskNumberByNameResponse) {}
}
message ListDiskLocationsRequest {
// Intentionally empty
}
message DiskLocation {
string Adapter = 1;
string Bus = 2;
string Target = 3;
string LUNID = 4;
}
message ListDiskLocationsResponse {
// Map of disk device IDs and <adapter, bus, target, lun ID> associated with each disk device
map <string, DiskLocation> disk_locations = 1;
}
message PartitionDiskRequest {
// Disk device ID of the disk to partition
string diskID = 1;
}
message PartitionDiskResponse {
// Intentionally empty
}
message RescanRequest {
// Intentionally empty
}
message RescanResponse {
// Intentionally empty
}
message GetDiskNumberByNameRequest {
// Disk ID
string disk_name = 1;
}
message GetDiskNumberByNameResponse {
// Disk number
string disk_number = 1;
}

View File

@ -1,81 +0,0 @@
syntax = "proto3";
package v1beta1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1beta1";
service Disk {
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
// disk devices enumerated by the host
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
// PartitionDisk initializes and partitions a disk device (if the disk has not
// been partitioned already) and returns the resulting volume device ID
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
// Rescan refreshes the host's storage cache
rpc Rescan(RescanRequest) returns (RescanResponse) {}
// ListDiskIDs returns a map of DiskID objects where the key is the disk number
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
// DiskStats returns the stats for the disk
rpc DiskStats(DiskStatsRequest) returns (DiskStatsResponse) {}
}
message ListDiskLocationsRequest {
// Intentionally empty
}
message DiskLocation {
string Adapter = 1;
string Bus = 2;
string Target = 3;
string LUNID = 4;
}
message ListDiskLocationsResponse {
// Map of disk device IDs and <adapter, bus, target, lun ID> associated with each disk device
map <string, DiskLocation> disk_locations = 1;
}
message PartitionDiskRequest {
// Disk device ID of the disk to partition
string diskID = 1;
}
message PartitionDiskResponse {
// Intentionally empty
}
message RescanRequest {
// Intentionally empty
}
message RescanResponse {
// Intentionally empty
}
message ListDiskIDsRequest {
// Intentionally empty
}
message DiskIDs {
// Map of Disk ID types and Disk ID values
map <string, string> identifiers = 1;
}
message ListDiskIDsResponse {
// Map of disk device numbers and IDs <page83> associated with each disk device
map <string, DiskIDs> diskIDs = 1;
}
message DiskStatsRequest {
// Disk device ID of the disk to get the size from
string diskID = 1;
}
message DiskStatsResponse {
//Total size of the volume
int64 diskSize = 1;
}

View File

@ -1,109 +0,0 @@
syntax = "proto3";
package v1beta2;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1beta2";
service Disk {
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
// disk devices enumerated by the host
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
// PartitionDisk initializes and partitions a disk device (if the disk has not
// been partitioned already) and returns the resulting volume device ID
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
// Rescan refreshes the host's storage cache
rpc Rescan(RescanRequest) returns (RescanResponse) {}
// ListDiskIDs returns a map of DiskID objects where the key is the disk number
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
// DiskStats returns the stats for the disk
rpc DiskStats(DiskStatsRequest) returns (DiskStatsResponse) {}
// SetAttachState sets the offline/online state of a disk
rpc SetAttachState(SetAttachStateRequest) returns (SetAttachStateResponse) {}
// GetAttachState gets the offline/online state of a disk
rpc GetAttachState(GetAttachStateRequest) returns (GetAttachStateResponse) {}
}
message ListDiskLocationsRequest {
// Intentionally empty
}
message DiskLocation {
string Adapter = 1;
string Bus = 2;
string Target = 3;
string LUNID = 4;
}
message ListDiskLocationsResponse {
// Map of disk device IDs and <adapter, bus, target, lun ID> associated with each disk device
map <string, DiskLocation> disk_locations = 1;
}
message PartitionDiskRequest {
// Disk device ID of the disk to partition
string diskID = 1;
}
message PartitionDiskResponse {
// Intentionally empty
}
message RescanRequest {
// Intentionally empty
}
message RescanResponse {
// Intentionally empty
}
message ListDiskIDsRequest {
// Intentionally empty
}
message DiskIDs {
// Map of Disk ID types and Disk ID values
map <string, string> identifiers = 1;
}
message ListDiskIDsResponse {
// Map of disk device numbers and IDs <page83> associated with each disk device
map <string, DiskIDs> diskIDs = 1;
}
message DiskStatsRequest {
// Disk device ID of the disk to get the size from
string diskID = 1;
}
message DiskStatsResponse {
//Total size of the volume
int64 diskSize = 1;
}
message SetAttachStateRequest {
// Disk device ID (number) of the disk which state will change
string diskID = 1;
// Online state to set for the disk. true for online, false for offline
bool isOnline = 2;
}
message SetAttachStateResponse {
}
message GetAttachStateRequest {
// Disk device ID (number) of the disk
string diskID = 1;
}
message GetAttachStateResponse {
// Online state of the disk. true for online, false for offline
bool isOnline = 1;
}

View File

@ -1,111 +0,0 @@
syntax = "proto3";
package v1beta3;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1beta3";
service Disk {
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
// disk devices enumerated by the host.
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
// PartitionDisk initializes and partitions a disk device with the GPT partition style
// (if the disk has not been partitioned already) and returns the resulting volume device ID.
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
// Rescan refreshes the host's storage cache.
rpc Rescan(RescanRequest) returns (RescanResponse) {}
// ListDiskIDs returns a map of DiskID objects where the key is the disk number.
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
// GetDiskStats returns the stats of a disk (currently it returns the disk size).
rpc GetDiskStats(GetDiskStatsRequest) returns (GetDiskStatsResponse) {}
// SetDiskState sets the offline/online state of a disk.
rpc SetDiskState(SetDiskStateRequest) returns (SetDiskStateResponse) {}
// GetDiskState gets the offline/online state of a disk.
rpc GetDiskState(GetDiskStateRequest) returns (GetDiskStateResponse) {}
}
message ListDiskLocationsRequest {
// Intentionally empty.
}
message DiskLocation {
string Adapter = 1;
string Bus = 2;
string Target = 3;
string LUNID = 4;
}
message ListDiskLocationsResponse {
// Map of disk number and <adapter, bus, target, lun ID> associated with each disk device.
map <uint32, DiskLocation> disk_locations = 1;
}
message PartitionDiskRequest {
// Disk device number of the disk to partition.
uint32 disk_number = 1;
}
message PartitionDiskResponse {
// Intentionally empty.
}
message RescanRequest {
// Intentionally empty.
}
message RescanResponse {
// Intentionally empty.
}
message ListDiskIDsRequest {
// Intentionally empty.
}
message DiskIDs {
// The disk page83 id.
string page83 = 1;
// The disk serial number.
string serial_number = 2;
}
message ListDiskIDsResponse {
// Map of disk numbers and disk identifiers associated with each disk device.
map <uint32, DiskIDs> diskIDs = 1; // the case is intentional for protoc to generate the field as DiskIDs
}
message GetDiskStatsRequest {
// Disk device number of the disk to get the stats from.
uint32 disk_number = 1;
}
message GetDiskStatsResponse {
// Total size of the volume.
int64 total_bytes = 1;
}
message SetDiskStateRequest {
// Disk device number of the disk.
uint32 disk_number = 1;
// Online state to set for the disk. true for online, false for offline.
bool is_online = 2;
}
message SetDiskStateResponse {
// Intentionally empty.
}
message GetDiskStateRequest {
// Disk device number of the disk.
uint32 disk_number = 1;
}
message GetDiskStateResponse {
// Online state of the disk. true for online, false for offline.
bool is_online = 1;
}

View File

@ -1,17 +0,0 @@
syntax = "proto3";
package api;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api";
// CommandError details errors yielded by cmdlet calls.
message CmdletError {
// Name of the cmdlet that errored out.
string cmdlet_name = 1;
// Error code that got returned.
uint32 code = 2;
// Human-readable error message - can be empty.
string message = 3;
}

View File

@ -1,136 +0,0 @@
syntax = "proto3";
package v1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v1";
service Filesystem {
// PathExists checks if the requested path exists in the host filesystem.
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
// Mkdir creates a directory at the requested path in the host filesystem.
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
// Rmdir removes the directory at the requested path in the host filesystem.
// This may be used for unlinking a symlink created through CreateSymlink.
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
// CreateSymlink creates a symbolic link called target_path that points to source_path
// in the host filesystem (target_path is the name of the symbolic link created,
// source_path is the existing path).
rpc CreateSymlink(CreateSymlinkRequest) returns (CreateSymlinkResponse) {}
// IsSymlink checks if a given path is a symlink.
rpc IsSymlink(IsSymlinkRequest) returns (IsSymlinkResponse) {}
}
message PathExistsRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
}
message PathExistsResponse {
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool exists = 1;
}
message MkdirRequest {
// The path to create in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
// Non-existent parent directories in the path will be automatically created.
// Directories will be created with Read and Write privileges of the Windows
// User account under which csi-proxy is started (typically LocalSystem).
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// The path parameter cannot already exist in the host's filesystem.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Maximum path length will be capped to 260 characters.
string path = 1;
}
message MkdirResponse {
// Intentionally empty.
}
message RmdirRequest {
// The path to remove in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Path cannot be a file of type symlink.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Force remove all contents under path (if any).
bool force = 2;
}
message RmdirResponse {
// Intentionally empty.
}
message CreateSymlinkRequest {
// The path of the existing directory to be linked.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs needs to match the paths specified as
// kubelet-csi-plugins-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// source_path cannot already exist in the host filesystem.
// Maximum path length will be capped to 260 characters.
string source_path = 1;
// Target path is the location of the new directory entry to be created in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs to match the paths specified as
// kubelet-pod-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// target_path needs to exist as a directory in the host that is empty.
// target_path cannot be a symbolic link.
// Maximum path length will be capped to 260 characters.
string target_path = 2;
}
message CreateSymlinkResponse {
// Intentionally empty.
}
message IsSymlinkRequest {
// The path whose existence as a symlink we want to check in the host's filesystem.
string path = 1;
}
message IsSymlinkResponse {
// Indicates whether the path in IsSymlinkRequest is a symlink.
bool is_symlink = 1;
}

View File

@ -1,168 +0,0 @@
syntax = "proto3";
package v1alpha1;
service Filesystem {
// PathExists checks if the requested path exists in the host's filesystem
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
// Mkdir creates a directory at the requested path in the host's filesystem
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
// Rmdir removes the directory at the requested path in the host's filesystem.
// This may be used for unlinking a symlink created through LinkPath
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
// LinkPath creates a local directory symbolic link between a source path
// and target path in the host's filesystem
rpc LinkPath(LinkPathRequest) returns (LinkPathResponse) {}
//IsMountPoint checks if a given path is mount or not
rpc IsMountPoint(IsMountPointRequest) returns (IsMountPointResponse) {}
}
// Context of the paths used for path prefix validation
enum PathContext {
// Indicates the kubelet-csi-plugins-path parameter of csi-proxy be used as
// the path context. This may be used while handling NodeStageVolume where
// a volume may need to be mounted at a plugin-specific path like:
// kubelet\plugins\kubernetes.io\csi\pv\<pv-name>\globalmount
PLUGIN = 0;
// Indicates the kubelet-pod-path parameter of csi-proxy be used as the path
// context. This may be used while handling NodePublishVolume where a staged
// volume may be need to be symlinked to a pod-specific path like:
// kubelet\pods\<pod-uuid>\volumes\kubernetes.io~csi\<pvc-name>\mount
POD = 1;
}
message PathExistsRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
// Context of the path parameter.
// This is used to validate prefix for absolute paths passed
PathContext context = 2;
}
message PathExistsResponse {
// Error message if any. Empty string indicates success
string error = 1;
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool exists = 2;
}
message MkdirRequest {
// The path to create in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
// Non-existent parent directories in the path will be automatically created.
// Directories will be created with Read and Write privileges of the Windows
// User account under which csi-proxy is started (typically LocalSystem).
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// The path parameter cannot already exist in the host's filesystem.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Context of the path parameter.
// This is used to validate prefix for absolute paths passed
PathContext context = 2;
}
message MkdirResponse {
// Error message if any. Empty string indicates success
string error = 1;
}
message RmdirRequest {
// The path to remove in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Path cannot be a file of type symlink.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Context of the path parameter.
// This is used to validate prefix for absolute paths passed
PathContext context = 2;
// Force remove all contents under path (if any).
bool force = 3;
}
message RmdirResponse {
// Error message if any. Empty string indicates success
string error = 1;
}
message LinkPathRequest {
// The path where the symlink is created in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs needs to match the paths specified as
// kubelet-csi-plugins-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// source_path cannot already exist in the host filesystem.
// Maximum path length will be capped to 260 characters.
string source_path = 1;
// Target path in the host's filesystem used for the symlink creation.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs to match the paths specified as
// kubelet-pod-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// target_path needs to exist as a directory in the host that is empty.
// target_path cannot be a symbolic link.
// Maximum path length will be capped to 260 characters.
string target_path = 2;
}
message LinkPathResponse {
// Error message if any. Empty string indicates success
string error = 1;
}
message IsMountPointRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
}
message IsMountPointResponse {
// Error message if any. Empty string indicates success
string error = 1;
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool is_mount_point = 2;
}

View File

@ -1,168 +0,0 @@
syntax = "proto3";
package v1beta1;
service Filesystem {
// PathExists checks if the requested path exists in the host's filesystem
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
// Mkdir creates a directory at the requested path in the host's filesystem
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
// Rmdir removes the directory at the requested path in the host's filesystem.
// This may be used for unlinking a symlink created through LinkPath
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
// LinkPath creates a local directory symbolic link between a source path
// and target path in the host's filesystem
rpc LinkPath(LinkPathRequest) returns (LinkPathResponse) {}
//IsMountPoint checks if a given path is mount or not
rpc IsMountPoint(IsMountPointRequest) returns (IsMountPointResponse) {}
}
// Context of the paths used for path prefix validation
enum PathContext {
// Indicates the kubelet-csi-plugins-path parameter of csi-proxy be used as
// the path context. This may be used while handling NodeStageVolume where
// a volume may need to be mounted at a plugin-specific path like:
// kubelet\plugins\kubernetes.io\csi\pv\<pv-name>\globalmount
PLUGIN = 0;
// Indicates the kubelet-pod-path parameter of csi-proxy be used as the path
// context. This may be used while handling NodePublishVolume where a staged
// volume may be need to be symlinked to a pod-specific path like:
// kubelet\pods\<pod-uuid>\volumes\kubernetes.io~csi\<pvc-name>\mount
POD = 1;
}
message PathExistsRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
// Context of the path parameter.
// This is used to validate prefix for absolute paths passed
PathContext context = 2;
}
message PathExistsResponse {
// Error message if any. Empty string indicates success
string error = 1;
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool exists = 2;
}
message MkdirRequest {
// The path to create in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
// Non-existent parent directories in the path will be automatically created.
// Directories will be created with Read and Write privileges of the Windows
// User account under which csi-proxy is started (typically LocalSystem).
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// The path parameter cannot already exist in the host's filesystem.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Context of the path parameter.
// This is used to validate prefix for absolute paths passed
PathContext context = 2;
}
message MkdirResponse {
// Error message if any. Empty string indicates success
string error = 1;
}
message RmdirRequest {
// The path to remove in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Path cannot be a file of type symlink.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Context of the path parameter.
// This is used to validate prefix for absolute paths passed
PathContext context = 2;
// Force remove all contents under path (if any).
bool force = 3;
}
message RmdirResponse {
// Error message if any. Empty string indicates success
string error = 1;
}
message LinkPathRequest {
// The path where the symlink is created in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs needs to match the paths specified as
// kubelet-csi-plugins-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// source_path cannot already exist in the host filesystem.
// Maximum path length will be capped to 260 characters.
string source_path = 1;
// Target path in the host's filesystem used for the symlink creation.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs to match the paths specified as
// kubelet-pod-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// target_path needs to exist as a directory in the host that is empty.
// target_path cannot be a symbolic link.
// Maximum path length will be capped to 260 characters.
string target_path = 2;
}
message LinkPathResponse {
// Error message if any. Empty string indicates success
string error = 1;
}
message IsMountPointRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
}
message IsMountPointResponse {
// Error message if any. Empty string indicates success
string error = 1;
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool is_mount_point = 2;
}

View File

@ -1,136 +0,0 @@
syntax = "proto3";
package v1beta2;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v1beta2";
service Filesystem {
// PathExists checks if the requested path exists in the host filesystem.
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
// Mkdir creates a directory at the requested path in the host filesystem.
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
// Rmdir removes the directory at the requested path in the host filesystem.
// This may be used for unlinking a symlink created through CreateSymlink.
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
// CreateSymlink creates a symbolic link called target_path that points to source_path
// in the host filesystem (target_path is the name of the symbolic link created,
// source_path is the existing path).
rpc CreateSymlink(CreateSymlinkRequest) returns (CreateSymlinkResponse) {}
// IsSymlink checks if a given path is a symlink.
rpc IsSymlink(IsSymlinkRequest) returns (IsSymlinkResponse) {}
}
message PathExistsRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
}
message PathExistsResponse {
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool exists = 1;
}
message MkdirRequest {
// The path to create in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
// Non-existent parent directories in the path will be automatically created.
// Directories will be created with Read and Write privileges of the Windows
// User account under which csi-proxy is started (typically LocalSystem).
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// The path parameter cannot already exist in the host's filesystem.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Maximum path length will be capped to 260 characters.
string path = 1;
}
message MkdirResponse {
// Intentionally empty.
}
message RmdirRequest {
// The path to remove in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Path cannot be a file of type symlink.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Force remove all contents under path (if any).
bool force = 2;
}
message RmdirResponse {
// Intentionally empty.
}
message CreateSymlinkRequest {
// The path of the existing directory to be linked.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs needs to match the paths specified as
// kubelet-csi-plugins-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// source_path cannot already exist in the host filesystem.
// Maximum path length will be capped to 260 characters.
string source_path = 1;
// Target path is the location of the new directory entry to be created in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs to match the paths specified as
// kubelet-pod-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// target_path needs to exist as a directory in the host that is empty.
// target_path cannot be a symbolic link.
// Maximum path length will be capped to 260 characters.
string target_path = 2;
}
message CreateSymlinkResponse {
// Intentionally empty.
}
message IsSymlinkRequest {
// The path whose existence as a symlink we want to check in the host's filesystem.
string path = 1;
}
message IsSymlinkResponse {
// Indicates whether the path in IsSymlinkRequest is a symlink.
bool is_symlink = 1;
}

View File

@ -1,163 +0,0 @@
syntax = "proto3";
package v2alpha1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v2alpha1";
service Filesystem {
// PathExists checks if the requested path exists in the host filesystem.
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
// Mkdir creates a directory at the requested path in the host filesystem.
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
// Rmdir removes the directory at the requested path in the host filesystem.
// This may be used for unlinking a symlink created through CreateSymlink.
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
// RmdirContents removes the contents of a directory in the host filesystem.
// Unlike Rmdir it won't delete the requested path, it'll only delete its contents.
rpc RmdirContents(RmdirContentsRequest) returns (RmdirContentsResponse) {}
// CreateSymlink creates a symbolic link called target_path that points to source_path
// in the host filesystem (target_path is the name of the symbolic link created,
// source_path is the existing path).
rpc CreateSymlink(CreateSymlinkRequest) returns (CreateSymlinkResponse) {}
// IsSymlink checks if a given path is a symlink.
rpc IsSymlink(IsSymlinkRequest) returns (IsSymlinkResponse) {}
}
message PathExistsRequest {
// The path whose existence we want to check in the host's filesystem
string path = 1;
}
message PathExistsResponse {
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
bool exists = 1;
}
message MkdirRequest {
// The path to create in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
// Non-existent parent directories in the path will be automatically created.
// Directories will be created with Read and Write privileges of the Windows
// User account under which csi-proxy is started (typically LocalSystem).
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// The path parameter cannot already exist in the host's filesystem.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Maximum path length will be capped to 260 characters.
string path = 1;
}
message MkdirResponse {
// Intentionally empty.
}
message RmdirRequest {
// The path to remove in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Path cannot be a file of type symlink.
// Maximum path length will be capped to 260 characters.
string path = 1;
// Force remove all contents under path (if any).
bool force = 2;
}
message RmdirResponse {
// Intentionally empty.
}
message RmdirContentsRequest {
// The path whose contents will be removed in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// Depending on the context parameter of this function, the path prefix needs
// to match the paths specified either as kubelet-csi-plugins-path
// or as kubelet-pod-path parameters of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// Path cannot be a file of type symlink.
// Maximum path length will be capped to 260 characters.
string path = 1;
}
message RmdirContentsResponse {
// Intentionally empty.
}
message CreateSymlinkRequest {
// The path of the existing directory to be linked.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs needs to match the paths specified as
// kubelet-csi-plugins-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// source_path cannot already exist in the host filesystem.
// Maximum path length will be capped to 260 characters.
string source_path = 1;
// Target path is the location of the new directory entry to be created in the host's filesystem.
// All special characters allowed by Windows in path names will be allowed
// except for restrictions noted below. For details, please check:
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
//
// Restrictions:
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
// The path prefix needs to match the paths specified as
// kubelet-pod-path parameter of csi-proxy.
// UNC paths of the form "\\server\share\path\file" are not allowed.
// All directory separators need to be backslash character: "\".
// Characters: .. / : | ? * in the path are not allowed.
// target_path needs to exist as a directory in the host that is empty.
// target_path cannot be a symbolic link.
// Maximum path length will be capped to 260 characters.
string target_path = 2;
}
message CreateSymlinkResponse {
// Intentionally empty.
}
message IsSymlinkRequest {
// The path whose existence as a symlink we want to check in the host's filesystem.
string path = 1;
}
message IsSymlinkResponse {
// Indicates whether the path in IsSymlinkRequest is a symlink.
bool is_symlink = 1;
}

View File

@ -1,153 +0,0 @@
syntax = "proto3";
package v1alpha1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/iscsi/v1alpha1";
service Iscsi {
// AddTargetPortal registers an iSCSI target network address for later
// discovery.
// AddTargetPortal currently does not support selecting different NICs or
// a different iSCSI initiator (e.g a hardware initiator). This means that
// Windows will select the initiator NIC and instance on its own.
rpc AddTargetPortal(AddTargetPortalRequest)
returns (AddTargetPortalResponse) {}
// DiscoverTargetPortal initiates discovery on an iSCSI target network address
// and returns discovered IQNs.
rpc DiscoverTargetPortal(DiscoverTargetPortalRequest)
returns (DiscoverTargetPortalResponse) {}
// RemoveTargetPortal removes an iSCSI target network address registration.
rpc RemoveTargetPortal(RemoveTargetPortalRequest)
returns (RemoveTargetPortalResponse) {}
// ListTargetPortal lists all currently registered iSCSI target network
// addresses.
rpc ListTargetPortals(ListTargetPortalsRequest)
returns (ListTargetPortalsResponse) {}
// ConnectTarget connects to an iSCSI Target
rpc ConnectTarget(ConnectTargetRequest) returns (ConnectTargetResponse) {}
// DisconnectTarget disconnects from an iSCSI Target
rpc DisconnectTarget(DisconnectTargetRequest)
returns (DisconnectTargetResponse) {}
// GetTargetDisks returns the disk addresses that correspond to an iSCSI
// target
rpc GetTargetDisks(GetTargetDisksRequest) returns (GetTargetDisksResponse) {}
}
// TargetPortal is an address and port pair for a specific iSCSI storage
// target.
message TargetPortal {
// iSCSI Target (server) address
string target_address = 1;
// iSCSI Target port (default iSCSI port is 3260)
uint32 target_port = 2;
}
message AddTargetPortalRequest {
// iSCSI Target Portal to register in the initiator
TargetPortal target_portal = 1;
}
message AddTargetPortalResponse {
// Intentionally empty
}
message DiscoverTargetPortalRequest {
// iSCSI Target Portal on which to initiate discovery
TargetPortal target_portal = 1;
}
message DiscoverTargetPortalResponse {
// List of discovered IQN addresses
// follows IQN format: iqn.yyyy-mm.naming-authority:unique-name
repeated string iqns = 1;
}
message RemoveTargetPortalRequest {
// iSCSI Target Portal
TargetPortal target_portal = 1;
}
message RemoveTargetPortalResponse {
// Intentionally empty
}
message ListTargetPortalsRequest {
// Intentionally empty
}
message ListTargetPortalsResponse {
// A list of Target Portals currently registered in the initiator
repeated TargetPortal target_portals = 1;
}
enum AuthenticationType {
// No authentication is used
NONE = 0;
// One way CHAP authentication. The target authenticates the initiator.
ONE_WAY_CHAP = 1;
// Mutual CHAP authentication. The target and initiator authenticate each
// other.
MUTUAL_CHAP = 2;
}
message ConnectTargetRequest {
// Target portal to which the initiator will connect
TargetPortal target_portal = 1;
// IQN of the iSCSI Target
string iqn = 2;
// Connection authentication type, None by default
//
// One Way Chap uses the chap_username and chap_secret
// fields mentioned below to authenticate the initiator.
//
// Mutual Chap uses both the user/secret mentioned below
// and the Initiator Chap Secret to authenticate the target and initiator.
AuthenticationType auth_type = 3;
// CHAP Username used to authenticate the initiator
string chap_username = 4;
// CHAP password used to authenticate the initiator
string chap_secret = 5;
}
message ConnectTargetResponse {
// Intentionally empty
}
message GetTargetDisksRequest {
// Target portal whose disks will be queried
TargetPortal target_portal = 1;
// IQN of the iSCSI Target
string iqn = 2;
}
message GetTargetDisksResponse {
// List composed of disk ids (numbers) that are associated with the
// iSCSI target
repeated string diskIDs = 1;
}
message DisconnectTargetRequest {
// Target portal from which initiator will disconnect
TargetPortal target_portal = 1;
// IQN of the iSCSI Target
string iqn = 2;
}
message DisconnectTargetResponse {
// Intentionally empty
}

View File

@ -1,175 +0,0 @@
syntax = "proto3";
package v1alpha2;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/iscsi/v1alpha2";
service Iscsi {
// AddTargetPortal registers an iSCSI target network address for later
// discovery.
// AddTargetPortal currently does not support selecting different NICs or
// a different iSCSI initiator (e.g a hardware initiator). This means that
// Windows will select the initiator NIC and instance on its own.
rpc AddTargetPortal(AddTargetPortalRequest)
returns (AddTargetPortalResponse) {}
// DiscoverTargetPortal initiates discovery on an iSCSI target network address
// and returns discovered IQNs.
rpc DiscoverTargetPortal(DiscoverTargetPortalRequest)
returns (DiscoverTargetPortalResponse) {}
// RemoveTargetPortal removes an iSCSI target network address registration.
rpc RemoveTargetPortal(RemoveTargetPortalRequest)
returns (RemoveTargetPortalResponse) {}
// ListTargetPortal lists all currently registered iSCSI target network
// addresses.
rpc ListTargetPortals(ListTargetPortalsRequest)
returns (ListTargetPortalsResponse) {}
// ConnectTarget connects to an iSCSI Target
rpc ConnectTarget(ConnectTargetRequest) returns (ConnectTargetResponse) {}
// DisconnectTarget disconnects from an iSCSI Target
rpc DisconnectTarget(DisconnectTargetRequest)
returns (DisconnectTargetResponse) {}
// GetTargetDisks returns the disk addresses that correspond to an iSCSI
// target
rpc GetTargetDisks(GetTargetDisksRequest) returns (GetTargetDisksResponse) {}
// SetMutualChapSecret sets the default CHAP secret that all initiators on
// this machine (node) use to authenticate the target on mutual CHAP
// authentication.
// NOTE: This method affects global node state and should only be used
// with consideration to other CSI drivers that run concurrently.
rpc SetMutualChapSecret(SetMutualChapSecretRequest)
returns (SetMutualChapSecretResponse) {}
}
// TargetPortal is an address and port pair for a specific iSCSI storage
// target.
message TargetPortal {
// iSCSI Target (server) address
string target_address = 1;
// iSCSI Target port (default iSCSI port is 3260)
uint32 target_port = 2;
}
message AddTargetPortalRequest {
// iSCSI Target Portal to register in the initiator
TargetPortal target_portal = 1;
}
message AddTargetPortalResponse {
// Intentionally empty
}
message DiscoverTargetPortalRequest {
// iSCSI Target Portal on which to initiate discovery
TargetPortal target_portal = 1;
}
message DiscoverTargetPortalResponse {
// List of discovered IQN addresses
// follows IQN format: iqn.yyyy-mm.naming-authority:unique-name
repeated string iqns = 1;
}
message RemoveTargetPortalRequest {
// iSCSI Target Portal
TargetPortal target_portal = 1;
}
message RemoveTargetPortalResponse {
// Intentionally empty
}
message ListTargetPortalsRequest {
// Intentionally empty
}
message ListTargetPortalsResponse {
// A list of Target Portals currently registered in the initiator
repeated TargetPortal target_portals = 1;
}
// iSCSI logon authentication type
enum AuthenticationType {
// No authentication is used
NONE = 0;
// One way CHAP authentication. The target authenticates the initiator.
ONE_WAY_CHAP = 1;
// Mutual CHAP authentication. The target and initiator authenticate each
// other.
MUTUAL_CHAP = 2;
}
message ConnectTargetRequest {
// Target portal to which the initiator will connect
TargetPortal target_portal = 1;
// IQN of the iSCSI Target
string iqn = 2;
// Connection authentication type, None by default
//
// One Way Chap uses the chap_username and chap_secret
// fields mentioned below to authenticate the initiator.
//
// Mutual Chap uses both the user/secret mentioned below
// and the Initiator Chap Secret (See `SetMutualChapSecret`)
// to authenticate the target and initiator.
AuthenticationType auth_type = 3;
// CHAP Username used to authenticate the initiator
string chap_username = 4;
// CHAP password used to authenticate the initiator
string chap_secret = 5;
}
message ConnectTargetResponse {
// Intentionally empty
}
message GetTargetDisksRequest {
// Target portal whose disks will be queried
TargetPortal target_portal = 1;
// IQN of the iSCSI Target
string iqn = 2;
}
message GetTargetDisksResponse {
// List composed of disk ids (numbers) that are associated with the
// iSCSI target
repeated string diskIDs = 1;
}
message DisconnectTargetRequest {
// Target portal from which initiator will disconnect
TargetPortal target_portal = 1;
// IQN of the iSCSI Target
string iqn = 2;
}
message DisconnectTargetResponse {
// Intentionally empty
}
message SetMutualChapSecretRequest {
// the default CHAP secret that all initiators on this machine (node) use to
// authenticate the target on mutual CHAP authentication.
// Must be at least 12 byte long for non-Ipsec connections, at least one
// byte long for Ipsec connections, and at most 16 bytes long.
string MutualChapSecret = 1;
}
message SetMutualChapSecretResponse {
// Intentionally empty
}

View File

@ -1,58 +0,0 @@
syntax = "proto3";
package v1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/smb/v1";
service Smb {
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
}
message NewSmbGlobalMappingRequest {
// A remote SMB share to mount
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
// Optional local path to mount the smb on
string local_path = 2;
// Username credential associated with the share
string username = 3;
// Password credential associated with the share
string password = 4;
}
message NewSmbGlobalMappingResponse {
// Intentionally empty.
}
message RemoveSmbGlobalMappingRequest {
// A remote SMB share mapping to remove
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
}
message RemoveSmbGlobalMappingResponse {
// Intentionally empty.
}

View File

@ -1,59 +0,0 @@
syntax = "proto3";
package v1alpha1;
service Smb {
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
}
message NewSmbGlobalMappingRequest {
// A remote SMB share to mount
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
// Optional local path to mount the smb on
string local_path = 2;
// Username credential associated with the share
string username = 3;
// Password credential associated with the share
string password = 4;
}
message NewSmbGlobalMappingResponse {
// Windows error code
// Success is represented as 0
string error = 1;
}
message RemoveSmbGlobalMappingRequest {
// A remote SMB share mapping to remove
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
}
message RemoveSmbGlobalMappingResponse {
// Windows error code
// Success is represented as 0
string error = 1;
}

View File

@ -1,59 +0,0 @@
syntax = "proto3";
package v1beta1;
service Smb {
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
}
message NewSmbGlobalMappingRequest {
// A remote SMB share to mount
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
// Optional local path to mount the smb on
string local_path = 2;
// Username credential associated with the share
string username = 3;
// Password credential associated with the share
string password = 4;
}
message NewSmbGlobalMappingResponse {
// Windows error code
// Success is represented as 0
string error = 1;
}
message RemoveSmbGlobalMappingRequest {
// A remote SMB share mapping to remove
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
}
message RemoveSmbGlobalMappingResponse {
// Windows error code
// Success is represented as 0
string error = 1;
}

View File

@ -1,58 +0,0 @@
syntax = "proto3";
package v1beta2;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/smb/v1beta2";
service Smb {
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
}
message NewSmbGlobalMappingRequest {
// A remote SMB share to mount
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
// Optional local path to mount the smb on
string local_path = 2;
// Username credential associated with the share
string username = 3;
// Password credential associated with the share
string password = 4;
}
message NewSmbGlobalMappingResponse {
// Intentionally empty.
}
message RemoveSmbGlobalMappingRequest {
// A remote SMB share mapping to remove
// All unicode characters allowed in SMB server name specifications are
// permitted except for restrictions below
//
// Restrictions:
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
// If not an IP address, share name has to be a valid DNS name.
// UNC specifications to local paths or prefix: \\?\ is not allowed.
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
string remote_path = 1;
}
message RemoveSmbGlobalMappingResponse {
// Intentionally empty.
}

View File

@ -1,93 +0,0 @@
syntax = "proto3";
package v1alpha1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/system/v1alpha1";
service System {
// GetBIOSSerialNumber returns the device's serial number
rpc GetBIOSSerialNumber(GetBIOSSerialNumberRequest)
returns (GetBIOSSerialNumberResponse) {}
// StartService starts a Windows service
// NOTE: This method affects global node state and should only be used
// with consideration to other CSI drivers that run concurrently.
rpc StartService(StartServiceRequest) returns (StartServiceResponse) {}
// StopService stops a Windows service
// NOTE: This method affects global node state and should only be used
// with consideration to other CSI drivers that run concurrently.
rpc StopService(StopServiceRequest) returns (StopServiceResponse) {}
// GetService queries a Windows service state
rpc GetService(GetServiceRequest) returns (GetServiceResponse) {}
}
message GetBIOSSerialNumberRequest {
// Intentionally empty
}
message GetBIOSSerialNumberResponse {
// Serial number
string serial_number = 1;
}
message StartServiceRequest {
// Service name (as listed in System\CCS\Services keys)
string name = 1;
}
message StartServiceResponse {
// Intentionally empty
}
message StopServiceRequest {
// Service name (as listed in System\CCS\Services keys)
string name = 1;
// Forces stopping of services that has dependent services
bool force = 2;
}
message StopServiceResponse {
// Intentionally empty
}
// https://docs.microsoft.com/en-us/windows/win32/api/winsvc/ns-winsvc-service_status#members
enum ServiceStatus {
UNKNOWN = 0;
STOPPED = 1;
START_PENDING = 2;
STOP_PENDING = 3;
RUNNING = 4;
CONTINUE_PENDING = 5;
PAUSE_PENDING = 6;
PAUSED = 7;
}
// https://docs.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-changeserviceconfiga
enum StartType {
BOOT = 0;
SYSTEM = 1;
AUTOMATIC = 2;
MANUAL = 3;
DISABLED = 4;
}
message GetServiceRequest {
// Service name (as listed in System\CCS\Services keys)
string name = 1;
}
message GetServiceResponse {
// Service display name
string display_name = 1;
// Service start type.
// Used to control whether a service will start on boot, and if so on which
// boot phase.
StartType start_type = 2;
// Service status, e.g. stopped, running, paused
ServiceStatus status = 3;
}

View File

@ -1,143 +0,0 @@
syntax = "proto3";
package v1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1";
service Volume {
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for all volumes from a
// given disk number and partition number (optional)
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
// MountVolume mounts the volume at the requested global staging path.
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
// UnmountVolume flushes data cache to disk and removes the global staging path.
rpc UnmountVolume(UnmountVolumeRequest) returns (UnmountVolumeResponse) {}
// IsVolumeFormatted checks if a volume is formatted.
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
// FormatVolume formats a volume with NTFS.
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
// ResizeVolume performs resizing of the partition and file system for a block based volume.
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
// GetVolumeStats gathers total bytes and used bytes for a volume.
rpc GetVolumeStats(GetVolumeStatsRequest) returns (GetVolumeStatsResponse) {}
// GetDiskNumberFromVolumeID gets the disk number of the disk where the volume is located.
rpc GetDiskNumberFromVolumeID(GetDiskNumberFromVolumeIDRequest) returns (GetDiskNumberFromVolumeIDResponse ) {}
// GetVolumeIDFromTargetPath gets the volume id for a given target path.
rpc GetVolumeIDFromTargetPath(GetVolumeIDFromTargetPathRequest) returns (GetVolumeIDFromTargetPathResponse) {}
// WriteVolumeCache write volume cache to disk.
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
}
message ListVolumesOnDiskRequest {
// Disk device number of the disk to query for volumes.
uint32 disk_number = 1;
// The partition number (optional), by default it uses the first partition of the disk.
uint32 partition_number = 2;
}
message ListVolumesOnDiskResponse {
// Volume device IDs of volumes on the specified disk.
repeated string volume_ids = 1;
}
message MountVolumeRequest {
// Volume device ID of the volume to mount.
string volume_id = 1;
// Path in the host's file system where the volume needs to be mounted.
string target_path = 2;
}
message MountVolumeResponse {
// Intentionally empty.
}
message UnmountVolumeRequest {
// Volume device ID of the volume to dismount.
string volume_id = 1;
// Path where the volume has been mounted.
string target_path = 2;
}
message UnmountVolumeResponse {
// Intentionally empty.
}
message IsVolumeFormattedRequest {
// Volume device ID of the volume to check.
string volume_id = 1;
}
message IsVolumeFormattedResponse {
// Is the volume formatted with NTFS.
bool formatted = 1;
}
message FormatVolumeRequest {
// Volume device ID of the volume to format.
string volume_id = 1;
}
message FormatVolumeResponse {
// Intentionally empty.
}
message ResizeVolumeRequest {
// Volume device ID of the volume to resize.
string volume_id = 1;
// New size in bytes of the volume.
int64 size_bytes = 2;
}
message ResizeVolumeResponse {
// Intentionally empty.
}
message GetVolumeStatsRequest{
// Volume device Id of the volume to get the stats for.
string volume_id = 1;
}
message GetVolumeStatsResponse{
// Total bytes
int64 total_bytes = 1;
// Used bytes
int64 used_bytes = 2;
}
message GetDiskNumberFromVolumeIDRequest {
// Volume device ID of the volume to get the disk number for.
string volume_id = 1;
}
message GetDiskNumberFromVolumeIDResponse {
// Corresponding disk number.
uint32 disk_number = 1;
}
message GetVolumeIDFromTargetPathRequest {
// The target path.
string target_path = 1;
}
message GetVolumeIDFromTargetPathResponse {
// The volume device ID.
string volume_id = 1;
}
message WriteVolumeCacheRequest {
// Volume device ID of the volume to flush the cache.
string volume_id = 1;
}
message WriteVolumeCacheResponse {
// Intentionally empty.
}

View File

@ -1,69 +0,0 @@
syntax = "proto3";
package v1alpha1;
service Volume {
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for
// all volumes on a Disk device
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
// MountVolume mounts the volume at the requested global staging path
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
// DismountVolume gracefully dismounts a volume
rpc DismountVolume(DismountVolumeRequest) returns (DismountVolumeResponse) {}
// IsVolumeFormatted checks if a volume is formatted with NTFS
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
// FormatVolume formats a volume with the provided file system
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
// ResizeVolume performs resizing of the partition and file system for a block based volume
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
}
message ListVolumesOnDiskRequest {
// Disk device ID of the disk to query for volumes
string disk_id = 1;
}
message ListVolumesOnDiskResponse {
// Volume device IDs of volumes on the specified disk
repeated string volume_ids = 1;
}
message MountVolumeRequest {
// Volume device ID of the volume to mount
string volume_id = 1;
// Path in the host's file system where the volume needs to be mounted
string path = 2;
}
message MountVolumeResponse {
// Intentionally empty
}
message DismountVolumeRequest {
// Volume device ID of the volume to dismount
string volume_id = 1;
// Path where the volume has been mounted.
string path = 2;
}
message DismountVolumeResponse {
// Intentionally empty
}
message IsVolumeFormattedRequest {
// Volume device ID of the volume to check
string volume_id = 1;
}
message IsVolumeFormattedResponse {
// Is the volume formatted with NTFS
bool formatted = 1;
}
message FormatVolumeRequest {
// Volume device ID of the volume to format
string volume_id = 1;
}
message FormatVolumeResponse {
// Intentionally empty
}
message ResizeVolumeRequest {
// Volume device ID of the volume to dismount
string volume_id = 1;
// New size of the volume
int64 size = 2;
}
message ResizeVolumeResponse {
// Intentionally empty
}

View File

@ -1,121 +0,0 @@
syntax = "proto3";
package v1beta1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1beta1";
service Volume {
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for
// all volumes on a Disk device
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
// MountVolume mounts the volume at the requested global staging path
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
// DismountVolume gracefully dismounts a volume
rpc DismountVolume(DismountVolumeRequest) returns (DismountVolumeResponse) {}
// IsVolumeFormatted checks if a volume is formatted with NTFS
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
// FormatVolume formats a volume with the provided file system
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
// ResizeVolume performs resizing of the partition and file system for a block based volume
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
// VolumeStats gathers DiskSize, VolumeSize and VolumeUsedSize for a volume
rpc VolumeStats(VolumeStatsRequest) returns (VolumeStatsResponse) {}
// GetVolumeDiskNumber gets the disk number of the disk where the volume is located
rpc GetVolumeDiskNumber(VolumeDiskNumberRequest) returns (VolumeDiskNumberResponse) {}
// GetVolumeIDFromMount gets the volume id for a given mount
rpc GetVolumeIDFromMount(VolumeIDFromMountRequest) returns (VolumeIDFromMountResponse) {}
}
message ListVolumesOnDiskRequest {
// Disk device ID of the disk to query for volumes
string disk_id = 1;
}
message ListVolumesOnDiskResponse {
// Volume device IDs of volumes on the specified disk
repeated string volume_ids = 1;
}
message MountVolumeRequest {
// Volume device ID of the volume to mount
string volume_id = 1;
// Path in the host's file system where the volume needs to be mounted
string path = 2;
}
message MountVolumeResponse {
// Intentionally empty
}
message DismountVolumeRequest {
// Volume device ID of the volume to dismount
string volume_id = 1;
// Path where the volume has been mounted.
string path = 2;
}
message DismountVolumeResponse {
// Intentionally empty
}
message IsVolumeFormattedRequest {
// Volume device ID of the volume to check
string volume_id = 1;
}
message IsVolumeFormattedResponse {
// Is the volume formatted with NTFS
bool formatted = 1;
}
message FormatVolumeRequest {
// Volume device ID of the volume to format
string volume_id = 1;
}
message FormatVolumeResponse {
// Intentionally empty
}
message ResizeVolumeRequest {
// Volume device ID of the volume to dismount
string volume_id = 1;
// New size of the volume
int64 size = 2;
}
message ResizeVolumeResponse {
// Intentionally empty
}
message VolumeStatsRequest{
// Volume device Id of the volume to get the stats for
string volume_id = 1;
}
message VolumeStatsResponse{
// Capacity of the volume
int64 volumeSize = 1;
// Used bytes
int64 volumeUsedSize = 2;
}
message VolumeDiskNumberRequest{
// Volume device Id of the volume to get the disk number for
string volume_id = 1;
}
message VolumeDiskNumberResponse{
// Corresponding disk number
int64 diskNumber = 1;
}
message VolumeIDFromMountRequest {
// Mount
string mount = 1;
}
message VolumeIDFromMountResponse {
// Mount
string volume_id = 1;
}

View File

@ -1,132 +0,0 @@
syntax = "proto3";
package v1beta2;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1beta2";
service Volume {
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for
// all volumes on a Disk device
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
// MountVolume mounts the volume at the requested global staging path
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
// DismountVolume gracefully dismounts a volume
rpc DismountVolume(DismountVolumeRequest) returns (DismountVolumeResponse) {}
// IsVolumeFormatted checks if a volume is formatted with NTFS
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
// FormatVolume formats a volume with the provided file system
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
// ResizeVolume performs resizing of the partition and file system for a block based volume
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
// VolumeStats gathers DiskSize, VolumeSize and VolumeUsedSize for a volume
rpc VolumeStats(VolumeStatsRequest) returns (VolumeStatsResponse) {}
// GetVolumeDiskNumber gets the disk number of the disk where the volume is located
rpc GetVolumeDiskNumber(VolumeDiskNumberRequest) returns (VolumeDiskNumberResponse) {}
// GetVolumeIDFromMount gets the volume id for a given mount
rpc GetVolumeIDFromMount(VolumeIDFromMountRequest) returns (VolumeIDFromMountResponse) {}
// WriteVolumeCache write volume cache to disk
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
}
message ListVolumesOnDiskRequest {
// Disk device ID of the disk to query for volumes
string disk_id = 1;
}
message ListVolumesOnDiskResponse {
// Volume device IDs of volumes on the specified disk
repeated string volume_ids = 1;
}
message MountVolumeRequest {
// Volume device ID of the volume to mount
string volume_id = 1;
// Path in the host's file system where the volume needs to be mounted
string path = 2;
}
message MountVolumeResponse {
// Intentionally empty
}
message DismountVolumeRequest {
// Volume device ID of the volume to dismount
string volume_id = 1;
// Path where the volume has been mounted.
string path = 2;
}
message DismountVolumeResponse {
// Intentionally empty
}
message IsVolumeFormattedRequest {
// Volume device ID of the volume to check
string volume_id = 1;
}
message IsVolumeFormattedResponse {
// Is the volume formatted with NTFS
bool formatted = 1;
}
message FormatVolumeRequest {
// Volume device ID of the volume to format
string volume_id = 1;
}
message FormatVolumeResponse {
// Intentionally empty
}
message ResizeVolumeRequest {
// Volume device ID of the volume to dismount
string volume_id = 1;
// New size of the volume
int64 size = 2;
}
message ResizeVolumeResponse {
// Intentionally empty
}
message VolumeStatsRequest{
// Volume device Id of the volume to get the stats for
string volume_id = 1;
}
message VolumeStatsResponse{
// Capacity of the volume
int64 volumeSize = 1;
// Used bytes
int64 volumeUsedSize = 2;
}
message VolumeDiskNumberRequest{
// Volume device Id of the volume to get the disk number for
string volume_id = 1;
}
message VolumeDiskNumberResponse{
// Corresponding disk number
int64 diskNumber = 1;
}
message VolumeIDFromMountRequest {
// Mount
string mount = 1;
}
message VolumeIDFromMountResponse {
// Mount
string volume_id = 1;
}
message WriteVolumeCacheRequest {
// Volume device ID of the volume to flush the cache
string volume_id = 1;
}
message WriteVolumeCacheResponse {
// Intentionally empty
}

View File

@ -1,143 +0,0 @@
syntax = "proto3";
package v1beta3;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1beta3";
service Volume {
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for all volumes from a
// given disk number and partition number (optional)
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
// MountVolume mounts the volume at the requested global staging path.
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
// UnmountVolume flushes data cache to disk and removes the global staging path.
rpc UnmountVolume(UnmountVolumeRequest) returns (UnmountVolumeResponse) {}
// IsVolumeFormatted checks if a volume is formatted.
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
// FormatVolume formats a volume with NTFS.
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
// ResizeVolume performs resizing of the partition and file system for a block based volume.
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
// GetVolumeStats gathers total bytes and used bytes for a volume.
rpc GetVolumeStats(GetVolumeStatsRequest) returns (GetVolumeStatsResponse) {}
// GetDiskNumberFromVolumeID gets the disk number of the disk where the volume is located.
rpc GetDiskNumberFromVolumeID(GetDiskNumberFromVolumeIDRequest) returns (GetDiskNumberFromVolumeIDResponse ) {}
// GetVolumeIDFromTargetPath gets the volume id for a given target path.
rpc GetVolumeIDFromTargetPath(GetVolumeIDFromTargetPathRequest) returns (GetVolumeIDFromTargetPathResponse) {}
// WriteVolumeCache write volume cache to disk.
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
}
message ListVolumesOnDiskRequest {
// Disk device number of the disk to query for volumes.
uint32 disk_number = 1;
// The partition number (optional), by default it uses the first partition of the disk.
uint32 partition_number = 2;
}
message ListVolumesOnDiskResponse {
// Volume device IDs of volumes on the specified disk.
repeated string volume_ids = 1;
}
message MountVolumeRequest {
// Volume device ID of the volume to mount.
string volume_id = 1;
// Path in the host's file system where the volume needs to be mounted.
string target_path = 2;
}
message MountVolumeResponse {
// Intentionally empty.
}
message UnmountVolumeRequest {
// Volume device ID of the volume to dismount.
string volume_id = 1;
// Path where the volume has been mounted.
string target_path = 2;
}
message UnmountVolumeResponse {
// Intentionally empty.
}
message IsVolumeFormattedRequest {
// Volume device ID of the volume to check.
string volume_id = 1;
}
message IsVolumeFormattedResponse {
// Is the volume formatted with NTFS.
bool formatted = 1;
}
message FormatVolumeRequest {
// Volume device ID of the volume to format.
string volume_id = 1;
}
message FormatVolumeResponse {
// Intentionally empty.
}
message ResizeVolumeRequest {
// Volume device ID of the volume to resize.
string volume_id = 1;
// New size in bytes of the volume.
int64 size_bytes = 2;
}
message ResizeVolumeResponse {
// Intentionally empty.
}
message GetVolumeStatsRequest{
// Volume device Id of the volume to get the stats for.
string volume_id = 1;
}
message GetVolumeStatsResponse{
// Total bytes
int64 total_bytes = 1;
// Used bytes
int64 used_bytes = 2;
}
message GetDiskNumberFromVolumeIDRequest {
// Volume device ID of the volume to get the disk number for.
string volume_id = 1;
}
message GetDiskNumberFromVolumeIDResponse {
// Corresponding disk number.
uint32 disk_number = 1;
}
message GetVolumeIDFromTargetPathRequest {
// The target path.
string target_path = 1;
}
message GetVolumeIDFromTargetPathResponse {
// The volume device ID.
string volume_id = 1;
}
message WriteVolumeCacheRequest {
// Volume device ID of the volume to flush the cache.
string volume_id = 1;
}
message WriteVolumeCacheResponse {
// Intentionally empty.
}

View File

@ -1,158 +0,0 @@
syntax = "proto3";
package v2alpha1;
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v2alpha1";
service Volume {
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for all volumes from a
// given disk number and partition number (optional)
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
// MountVolume mounts the volume at the requested global staging path.
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
// UnmountVolume flushes data cache to disk and removes the global staging path.
rpc UnmountVolume(UnmountVolumeRequest) returns (UnmountVolumeResponse) {}
// IsVolumeFormatted checks if a volume is formatted.
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
// FormatVolume formats a volume with NTFS.
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
// ResizeVolume performs resizing of the partition and file system for a block based volume.
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
// GetVolumeStats gathers total bytes and used bytes for a volume.
rpc GetVolumeStats(GetVolumeStatsRequest) returns (GetVolumeStatsResponse) {}
// GetDiskNumberFromVolumeID gets the disk number of the disk where the volume is located.
rpc GetDiskNumberFromVolumeID(GetDiskNumberFromVolumeIDRequest) returns (GetDiskNumberFromVolumeIDResponse ) {}
// GetVolumeIDFromTargetPath gets the volume id for a given target path.
rpc GetVolumeIDFromTargetPath(GetVolumeIDFromTargetPathRequest) returns (GetVolumeIDFromTargetPathResponse) {}
// GetClosestVolumeIDFromTargetPath gets the closest volume id for a given target path
// by following symlinks and moving up in the filesystem, if after moving up in the filesystem
// we get to a DriveLetter then the volume corresponding to this drive letter is returned instead.
rpc GetClosestVolumeIDFromTargetPath(GetClosestVolumeIDFromTargetPathRequest) returns (GetClosestVolumeIDFromTargetPathResponse) {}
// WriteVolumeCache write volume cache to disk.
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
}
message ListVolumesOnDiskRequest {
// Disk device number of the disk to query for volumes.
uint32 disk_number = 1;
// The partition number (optional), by default it uses the first partition of the disk.
uint32 partition_number = 2;
}
message ListVolumesOnDiskResponse {
// Volume device IDs of volumes on the specified disk.
repeated string volume_ids = 1;
}
message MountVolumeRequest {
// Volume device ID of the volume to mount.
string volume_id = 1;
// Path in the host's file system where the volume needs to be mounted.
string target_path = 2;
}
message MountVolumeResponse {
// Intentionally empty.
}
message UnmountVolumeRequest {
// Volume device ID of the volume to dismount.
string volume_id = 1;
// Path where the volume has been mounted.
string target_path = 2;
}
message UnmountVolumeResponse {
// Intentionally empty.
}
message IsVolumeFormattedRequest {
// Volume device ID of the volume to check.
string volume_id = 1;
}
message IsVolumeFormattedResponse {
// Is the volume formatted with NTFS.
bool formatted = 1;
}
message FormatVolumeRequest {
// Volume device ID of the volume to format.
string volume_id = 1;
}
message FormatVolumeResponse {
// Intentionally empty.
}
message ResizeVolumeRequest {
// Volume device ID of the volume to resize.
string volume_id = 1;
// New size in bytes of the volume.
int64 size_bytes = 2;
}
message ResizeVolumeResponse {
// Intentionally empty.
}
message GetVolumeStatsRequest{
// Volume device Id of the volume to get the stats for.
string volume_id = 1;
}
message GetVolumeStatsResponse{
// Total bytes
int64 total_bytes = 1;
// Used bytes
int64 used_bytes = 2;
}
message GetDiskNumberFromVolumeIDRequest {
// Volume device ID of the volume to get the disk number for.
string volume_id = 1;
}
message GetDiskNumberFromVolumeIDResponse {
// Corresponding disk number.
uint32 disk_number = 1;
}
message GetVolumeIDFromTargetPathRequest {
// The target path.
string target_path = 1;
}
message GetVolumeIDFromTargetPathResponse {
// The volume device ID.
string volume_id = 1;
}
message GetClosestVolumeIDFromTargetPathRequest {
// The target path.
string target_path = 1;
}
message GetClosestVolumeIDFromTargetPathResponse {
// The volume device ID.
string volume_id = 1;
}
message WriteVolumeCacheRequest {
// Volume device ID of the volume to flush the cache.
string volume_id = 1;
}
message WriteVolumeCacheResponse {
// Intentionally empty.
}

27
docker/iscsiadm Executable file → Normal file
View File

@ -1,28 +1,5 @@
#!/bin/bash #!/bin/bash
: "${ISCSIADM_HOST_STRATEGY:=chroot}" # https://engineering.docker.com/2019/07/road-to-containing-iscsi/
: "${ISCSIADM_HOST_PATH:=iscsiadm}"
echoerr() { printf "%s\n" "$*" >&2; } chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin" iscsiadm "${@:1}"
case ${ISCSIADM_HOST_STRATEGY} in
chroot)
# https://engineering.docker.com/2019/07/road-to-containing-iscsi/
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" ${ISCSIADM_HOST_PATH} "${@:1}"
;;
nsenter)
# https://github.com/siderolabs/extensions/issues/38#issuecomment-1125403043
iscsid_pid=$(pgrep --exact --oldest iscsid)
if [[ "${iscsid_pid}x" == "x" ]]; then
echoerr "failed to find iscsid pid for nsenter"
exit 1
fi
nsenter --mount="/proc/${iscsid_pid}/ns/mnt" --net="/proc/${iscsid_pid}/ns/net" -- ${ISCSIADM_HOST_PATH} "${@:1}"
;;
*)
echoerr "invalid ISCSIADM_HOST_STRATEGY: ${ISCSIADM_HOST_STRATEGY}"
exit 1
;;
esac

View File

@ -1,36 +0,0 @@
#!/bin/bash
set -e
set -x
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then
export PLATFORM_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export PLATFORM_ARCH="arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export PLATFORM_ARCH="armhf"
else
echo "unsupported/unknown kopia PLATFORM ${PLATFORM}"
exit 0
fi
echo "I am installing kopia $KOPIA_VERSION"
export DEB_FILE="kopia.deb"
wget -O "${DEB_FILE}" "https://github.com/kopia/kopia/releases/download/v${KOPIA_VERSION}/kopia_${KOPIA_VERSION}_linux_${PLATFORM_ARCH}.deb"
dpkg -i "${DEB_FILE}"
rm "${DEB_FILE}"

36
docker/mount Executable file → Normal file
View File

@ -1,37 +1,7 @@
#!/bin/bash #!/bin/bash
container_supported_filesystems=( if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then
"ext2" chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}"
"ext3"
"ext4"
"ext4dev"
"btrfs"
"xfs"
"vfat"
"nfs"
"nfs3"
"nfs4"
"cifs"
"smb"
"smb3"
"bind"
)
while getopts "t:" opt; do
case "$opt" in
t)
if [[ "x${USE_HOST_MOUNT_TOOLS}" == "x" ]]; then
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
fi
;;
esac
done
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]]; then
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" mount "${@:1}"
else else
/usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" mount "${@:1}" /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}"
fi fi

2
docker/multipath Executable file → Normal file
View File

@ -1,3 +1,3 @@
#!/bin/bash #!/bin/bash
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" multipath "${@:1}" chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/sbin:/usr/bin" multipath "${@:1}"

14
docker/node-installer.sh Executable file → Normal file
View File

@ -15,17 +15,12 @@ if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64" PLATFORM="linux/amd64"
fi fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then if [ "$PLATFORM" = "linux/amd64" ]; then
export NODE_DISTRO="linux-x64" export NODE_DISTRO="linux-x64"
elif [ "$PLATFORM" = "linux/arm64" ]; then elif [ "$PLATFORM" = "linux/arm64" ]; then
export NODE_DISTRO="linux-arm64" export NODE_DISTRO="linux-arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export NODE_DISTRO="linux-armv7l" export NODE_DISTRO="linux-armv7l"
elif [ "$PLATFORM" = "linux/s390x" ]; then
export NODE_DISTRO="linux-s390x"
elif [ "$PLATFORM" = "linux/ppc64le" ]; then
export NODE_DISTRO="linux-ppc64le"
else else
echo "unsupported/unknown PLATFORM ${PLATFORM}" echo "unsupported/unknown PLATFORM ${PLATFORM}"
exit 1 exit 1
@ -33,11 +28,8 @@ fi
echo "I am installing node $NODE_VERSION $NODE_DISTRO" echo "I am installing node $NODE_VERSION $NODE_DISTRO"
if [[ "x${NODE_TARGET_DIR}" == "x" ]]; then
NODE_TARGET_DIR="/usr/local/lib/nodejs"
fi
wget https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz >/dev/null 2>&1 wget https://nodejs.org/dist/${NODE_VERSION}/node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz >/dev/null 2>&1
mkdir -p ${NODE_TARGET_DIR} mkdir -p /usr/local/lib/nodejs
tar -xJf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C ${NODE_TARGET_DIR} --strip-components=1 tar -xJf node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz -C /usr/local/lib/nodejs --strip-components=1
rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz rm node-${NODE_VERSION}-${NODE_DISTRO}.tar.xz
rm -rf /var/lib/apt/lists/*

View File

@ -0,0 +1,23 @@
#!/bin/bash
set -e
set -x
PLATFORM=$TARGETPLATFORM
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
if [ "$PLATFORM" = "linux/amd64" ]; then
export NODE_ARCH="x64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export NODE_ARCH="arm64"
elif [ "$PLATFORM" = "linux/arm/v7" ]; then
export NODE_ARCH="armv7l"
else
echo "unsupported/unknown PLATFORM ${PLATFORM}"
exit 1
fi
npm install --target_arch="${NODE_ARCH}"

View File

@ -1,40 +0,0 @@
#!/bin/bash
set -e
set -x
if [[ -z "${OBJECTIVEFS_DOWNLOAD_ID}" ]]; then
echo 'missing OBJECTIVEFS_DOWNLOAD_ID, moving on'
exit 0
fi
PLATFORM_TYPE=${1}
if [[ "${PLATFORM_TYPE}" == "build" ]]; then
PLATFORM=$BUILDPLATFORM
else
PLATFORM=$TARGETPLATFORM
fi
if [[ "x${PLATFORM}" == "x" ]]; then
PLATFORM="linux/amd64"
fi
# these come from the --platform option of buildx, indirectly from DOCKER_BUILD_PLATFORM in main.yaml
if [ "$PLATFORM" = "linux/amd64" ]; then
export OBJECTIVEFS_ARCH="amd64"
elif [ "$PLATFORM" = "linux/arm64" ]; then
export OBJECTIVEFS_ARCH="arm64"
else
echo "unsupported/unknown PLATFORM ${PLATFORM}"
exit 0
fi
export DEB_FILE="objectivefs_${OBJECTIVEFS_VERSION}_${OBJECTIVEFS_ARCH}.deb"
echo "I am installing objectivefs $OBJECTIVEFS_VERSION"
wget "https://objectivefs.com/user/download/${OBJECTIVEFS_DOWNLOAD_ID}/${DEB_FILE}"
dpkg -i "${DEB_FILE}"
rm "${DEB_FILE}"

View File

@ -1,3 +0,0 @@
#!/bin/bash
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" oneclient "${@:1}"

Some files were not shown because too many files have changed in this diff Show More