commit
8cec5cb3c5
|
|
@ -1,9 +1,9 @@
|
|||
chart
|
||||
dev
|
||||
examples
|
||||
contrib
|
||||
node_modules
|
||||
Dockerfile*
|
||||
TODO.md
|
||||
.git
|
||||
/ci
|
||||
**
|
||||
|
||||
!/bin
|
||||
!/csi_proto
|
||||
!/csi_proxy_proto
|
||||
!/docker
|
||||
!/LICENSE
|
||||
!/package*.json
|
||||
!/src
|
||||
|
|
|
|||
|
|
@ -0,0 +1,44 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
echo "$DOCKER_PASSWORD" | docker login docker.io -u "$DOCKER_USERNAME" --password-stdin
|
||||
echo "$GHCR_PASSWORD" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin
|
||||
|
||||
export DOCKER_ORG="democraticcsi"
|
||||
export DOCKER_PROJECT="democratic-csi"
|
||||
export DOCKER_REPO="docker.io/${DOCKER_ORG}/${DOCKER_PROJECT}"
|
||||
|
||||
export GHCR_ORG="democratic-csi"
|
||||
export GHCR_PROJECT="democratic-csi"
|
||||
export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}"
|
||||
|
||||
export MANIFEST_NAME="democratic-csi-combined:${IMAGE_TAG}"
|
||||
|
||||
if [[ -n "${IMAGE_TAG}" ]]; then
|
||||
# create local manifest to work with
|
||||
buildah manifest rm "${MANIFEST_NAME}" || true
|
||||
buildah manifest create "${MANIFEST_NAME}"
|
||||
|
||||
# all all the existing linux data to the manifest
|
||||
buildah manifest add "${MANIFEST_NAME}" --all "${DOCKER_REPO}:${IMAGE_TAG}"
|
||||
buildah manifest inspect "${MANIFEST_NAME}"
|
||||
|
||||
# import pre-built images
|
||||
buildah pull docker-archive:democratic-csi-windows-ltsc2019.tar
|
||||
buildah pull docker-archive:democratic-csi-windows-ltsc2022.tar
|
||||
|
||||
# add pre-built images to manifest
|
||||
buildah manifest add "${MANIFEST_NAME}" democratic-csi-windows:${GITHUB_RUN_ID}-ltsc2019
|
||||
buildah manifest add "${MANIFEST_NAME}" democratic-csi-windows:${GITHUB_RUN_ID}-ltsc2022
|
||||
buildah manifest inspect "${MANIFEST_NAME}"
|
||||
|
||||
# push manifest
|
||||
buildah manifest push --all "${MANIFEST_NAME}" docker://${DOCKER_REPO}:${IMAGE_TAG}
|
||||
buildah manifest push --all "${MANIFEST_NAME}" docker://${GHCR_REPO}:${IMAGE_TAG}
|
||||
|
||||
# cleanup
|
||||
buildah manifest rm "${MANIFEST_NAME}" || true
|
||||
else
|
||||
:
|
||||
fi
|
||||
|
|
@ -11,20 +11,12 @@ export GHCR_ORG="democratic-csi"
|
|||
export GHCR_PROJECT="democratic-csi"
|
||||
export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}"
|
||||
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
export GIT_TAG=${GITHUB_REF#refs/tags/}
|
||||
else
|
||||
export GIT_BRANCH=${GITHUB_REF#refs/heads/}
|
||||
fi
|
||||
|
||||
if [[ -n "${GIT_TAG}" ]]; then
|
||||
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_TAG} -t ${GHCR_REPO}:${GIT_TAG} .
|
||||
elif [[ -n "${GIT_BRANCH}" ]]; then
|
||||
if [[ "${GIT_BRANCH}" == "master" ]]; then
|
||||
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:latest -t ${GHCR_REPO}:latest .
|
||||
else
|
||||
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${GIT_BRANCH} -t ${GHCR_REPO}:${GIT_BRANCH} .
|
||||
fi
|
||||
if [[ -n "${IMAGE_TAG}" ]]; then
|
||||
# -t ${GHCR_REPO}:${IMAGE_TAG}
|
||||
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \
|
||||
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
|
||||
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
|
||||
.
|
||||
else
|
||||
:
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -17,12 +17,13 @@ jobs:
|
|||
with:
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
build-npm:
|
||||
name: build-npm
|
||||
runs-on:
|
||||
- self-hosted
|
||||
build-npm-linux-amd64:
|
||||
runs-on: ubuntu-20.04
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- shell: bash
|
||||
name: npm install
|
||||
run: |
|
||||
|
|
@ -30,35 +31,84 @@ jobs:
|
|||
- name: upload build
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
#path: node_modules/
|
||||
path: node_modules.tar.gz
|
||||
retention-days: 7
|
||||
name: node-modules-linux-amd64
|
||||
path: node_modules-linux-amd64.tar.gz
|
||||
retention-days: 1
|
||||
|
||||
csi-sanity-synology:
|
||||
build-npm-windows-amd64:
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- shell: pwsh
|
||||
name: npm install
|
||||
run: |
|
||||
ci\bin\build.ps1
|
||||
- name: upload build
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: node-modules-windows-amd64
|
||||
path: node_modules-windows-amd64.tar.gz
|
||||
retention-days: 1
|
||||
|
||||
csi-sanity-synology-dsm6:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- synlogy/iscsi.yaml
|
||||
- synlogy/dsm6/iscsi.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-synology
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
SYNOLOGY_HOST: ${{ secrets.SANITY_SYNOLOGY_HOST }}
|
||||
SYNOLOGY_PORT: ${{ secrets.SANITY_SYNOLOGY_PORT }}
|
||||
SYNOLOGY_HOST: ${{ secrets.SANITY_SYNOLOGY_DSM6_HOST }}
|
||||
SYNOLOGY_PORT: ${{ secrets.SANITY_SYNOLOGY_DSM6_PORT }}
|
||||
SYNOLOGY_USERNAME: ${{ secrets.SANITY_SYNOLOGY_USERNAME }}
|
||||
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
|
||||
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
|
||||
|
||||
csi-sanity-synology-dsm7:
|
||||
needs:
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- synlogy/dsm7/iscsi.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-synology
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
SYNOLOGY_HOST: ${{ secrets.SANITY_SYNOLOGY_DSM7_HOST }}
|
||||
SYNOLOGY_PORT: ${{ secrets.SANITY_SYNOLOGY_DSM7_PORT }}
|
||||
SYNOLOGY_USERNAME: ${{ secrets.SANITY_SYNOLOGY_USERNAME }}
|
||||
SYNOLOGY_PASSWORD: ${{ secrets.SANITY_SYNOLOGY_PASSWORD }}
|
||||
SYNOLOGY_VOLUME: ${{ secrets.SANITY_SYNOLOGY_VOLUME }}
|
||||
|
|
@ -66,7 +116,7 @@ jobs:
|
|||
# api-based drivers
|
||||
csi-sanity-truenas-scale-22_02:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
|
@ -74,16 +124,18 @@ jobs:
|
|||
- truenas/scale/22.02/scale-iscsi.yaml
|
||||
- truenas/scale/22.02/scale-nfs.yaml
|
||||
# 80 char limit
|
||||
#- truenas/scale-smb.yaml
|
||||
- truenas/scale/22.02/scale-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-zfs-local
|
||||
#- csi-sanity-truenas-scale
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-truenas
|
||||
#- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -97,25 +149,27 @@ jobs:
|
|||
# ssh-based drivers
|
||||
csi-sanity-truenas-core-12_0:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
# 63 char limit
|
||||
#- truenas/core-iscsi.yaml
|
||||
- truenas/core/12.0/core-iscsi.yaml
|
||||
- truenas/core/12.0/core-nfs.yaml
|
||||
# 80 char limit
|
||||
#- truenas/core-smb.yaml
|
||||
- truenas/core/12.0/core-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-zfs-local
|
||||
#- csi-sanity-truenas-core
|
||||
- Linux
|
||||
- X64
|
||||
#- csi-sanity-truenas
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -129,7 +183,7 @@ jobs:
|
|||
# ssh-based drivers
|
||||
csi-sanity-truenas-core-13_0:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
|
@ -137,16 +191,18 @@ jobs:
|
|||
- truenas/core/13.0/core-iscsi.yaml
|
||||
- truenas/core/13.0/core-nfs.yaml
|
||||
# 80 char limit
|
||||
#- truenas/core-smb.yaml
|
||||
- truenas/core/13.0/core-smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- csi-sanity-zfs-local
|
||||
#- csi-sanity-truenas-core
|
||||
- Linux
|
||||
- X64
|
||||
#- csi-sanity-truenas
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -160,21 +216,24 @@ jobs:
|
|||
# ssh-based drivers
|
||||
csi-sanity-zfs-generic:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- zfs-generic/iscsi.yaml
|
||||
- zfs-generic/nfs.yaml
|
||||
- zfs-generic/smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-zfs-generic
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -185,10 +244,70 @@ jobs:
|
|||
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
|
||||
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
|
||||
|
||||
# client drivers
|
||||
csi-sanity-client:
|
||||
needs:
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- client/nfs.yaml
|
||||
- client/smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-client
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
|
||||
SHARE_NAME: tank_client_smb
|
||||
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
|
||||
|
||||
csi-sanity-client-windows:
|
||||
needs:
|
||||
- build-npm-windows-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- client\smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Windows
|
||||
- X64
|
||||
- csi-sanity-client
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules-windows-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci\bin\run.ps1
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: ".\\ci\\configs\\${{ matrix.config }}"
|
||||
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
|
||||
SHARE_NAME: tank_client_smb
|
||||
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
|
||||
|
||||
|
||||
|
||||
# zfs-local drivers
|
||||
csi-sanity-zfs-local:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
|
|
@ -197,12 +316,14 @@ jobs:
|
|||
- zfs-local/dataset.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Linux
|
||||
- X64
|
||||
- csi-sanity-zfs-local
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: node-modules-linux-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
|
|
@ -213,36 +334,108 @@ jobs:
|
|||
# local-hostpath driver
|
||||
csi-sanity-local-hostpath:
|
||||
needs:
|
||||
- build-npm
|
||||
- build-npm-linux-amd64
|
||||
- build-npm-windows-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- local-hostpath/basic.yaml
|
||||
os: [Linux, Windows]
|
||||
include:
|
||||
- os: Linux
|
||||
npmartifact: node-modules-linux-amd64
|
||||
template: "./ci/configs/local-hostpath/basic.yaml"
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
- os: Windows
|
||||
npmartifact: node-modules-windows-amd64
|
||||
template: ".\\ci\\configs\\local-hostpath\\basic.yaml"
|
||||
run: |
|
||||
# run tests
|
||||
ci\bin\run.ps1
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- ${{ matrix.os }}
|
||||
- X64
|
||||
- csi-sanity-local-hostpath
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules
|
||||
name: ${{ matrix.npmartifact }}
|
||||
- name: csi-sanity
|
||||
run: ${{ matrix.run }}
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "${{ matrix.template }}"
|
||||
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
|
||||
|
||||
csi-sanity-windows-node:
|
||||
needs:
|
||||
- build-npm-windows-amd64
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- windows\iscsi.yaml
|
||||
- windows\smb.yaml
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- Windows
|
||||
- X64
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: node-modules-windows-amd64
|
||||
- name: csi-sanity
|
||||
run: |
|
||||
# run tests
|
||||
ci/bin/run.sh
|
||||
ci\bin\run.ps1
|
||||
env:
|
||||
TEMPLATE_CONFIG_FILE: "./ci/configs/${{ matrix.config }}"
|
||||
CSI_SANITY_SKIP: "should fail when requesting to create a snapshot with already existing name and different source volume ID|should fail when requesting to create a volume with already existing name and different capacity"
|
||||
TEMPLATE_CONFIG_FILE: ".\\ci\\configs\\${{ matrix.config }}"
|
||||
SERVER_HOST: ${{ secrets.SANITY_ZFS_GENERIC_HOST }}
|
||||
SERVER_USERNAME: ${{ secrets.SANITY_ZFS_GENERIC_USERNAME }}
|
||||
SERVER_PASSWORD: ${{ secrets.SANITY_ZFS_GENERIC_PASSWORD }}
|
||||
CSI_SANITY_FOCUS: "Node Service"
|
||||
|
||||
build-docker:
|
||||
determine-image-tag:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag: ${{ steps.tag.outputs.tag }}
|
||||
steps:
|
||||
- id: tag
|
||||
run: |
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
export GIT_TAG=${GITHUB_REF#refs/tags/}
|
||||
else
|
||||
export GIT_BRANCH=${GITHUB_REF#refs/heads/}
|
||||
fi
|
||||
if [[ -n "${GIT_TAG}" ]]; then
|
||||
echo "::set-output name=tag::${GIT_TAG}"
|
||||
elif [[ -n "${GIT_BRANCH}" ]]; then
|
||||
if [[ "${GIT_BRANCH}" == "master" ]]; then
|
||||
echo "::set-output name=tag::latest"
|
||||
else
|
||||
echo "::set-output name=tag::${GIT_BRANCH}"
|
||||
fi
|
||||
else
|
||||
:
|
||||
fi
|
||||
|
||||
build-docker-linux:
|
||||
needs:
|
||||
- csi-sanity-synology
|
||||
- determine-image-tag
|
||||
- csi-sanity-synology-dsm6
|
||||
- csi-sanity-synology-dsm7
|
||||
- csi-sanity-truenas-scale-22_02
|
||||
- csi-sanity-truenas-core-12_0
|
||||
- csi-sanity-truenas-core-13_0
|
||||
- csi-sanity-zfs-generic
|
||||
- csi-sanity-client
|
||||
- csi-sanity-client-windows
|
||||
- csi-sanity-zfs-local
|
||||
- csi-sanity-local-hostpath
|
||||
- csi-sanity-windows-node
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
|
@ -250,7 +443,7 @@ jobs:
|
|||
run: |
|
||||
export ARCH=$([ $(uname -m) = "x86_64" ] && echo "amd64" || echo "arm64")
|
||||
mkdir -p ~/.docker/cli-plugins/
|
||||
wget -qO ~/.docker/cli-plugins/docker-buildx https://github.com/docker/buildx/releases/download/v0.5.1/buildx-v0.5.1.linux-${ARCH}
|
||||
wget -qO ~/.docker/cli-plugins/docker-buildx https://github.com/docker/buildx/releases/download/v0.8.2/buildx-v0.8.2.linux-${ARCH}
|
||||
chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
docker info
|
||||
docker buildx version
|
||||
|
|
@ -267,3 +460,78 @@ jobs:
|
|||
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
|
||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||
DOCKER_BUILD_PLATFORM: linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le
|
||||
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
|
||||
|
||||
build-docker-windows:
|
||||
needs:
|
||||
- csi-sanity-synology-dsm6
|
||||
- csi-sanity-synology-dsm7
|
||||
- csi-sanity-truenas-scale-22_02
|
||||
- csi-sanity-truenas-core-12_0
|
||||
- csi-sanity-truenas-core-13_0
|
||||
- csi-sanity-zfs-generic
|
||||
- csi-sanity-client
|
||||
- csi-sanity-client-windows
|
||||
- csi-sanity-zfs-local
|
||||
- csi-sanity-local-hostpath
|
||||
- csi-sanity-windows-node
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-2019, windows-2022]
|
||||
include:
|
||||
- os: windows-2019
|
||||
core_base_tag: ltsc2019
|
||||
nano_base_tag: "1809"
|
||||
file: Dockerfile.Windows
|
||||
- os: windows-2022
|
||||
core_base_tag: ltsc2022
|
||||
nano_base_tag: ltsc2022
|
||||
file: Dockerfile.Windows
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: docker build
|
||||
shell: bash
|
||||
run: |
|
||||
docker info
|
||||
docker build --pull -f ${{ matrix.file }} --build-arg NANO_BASE_TAG=${{ matrix.nano_base_tag }} --build-arg CORE_BASE_TAG=${{ matrix.core_base_tag }} -t democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} \
|
||||
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
|
||||
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
|
||||
.
|
||||
docker inspect democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }}
|
||||
docker save democratic-csi-windows:${GITHUB_RUN_ID}-${{ matrix.core_base_tag }} -o democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||
- name: upload image tar
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||
path: democratic-csi-windows-${{ matrix.core_base_tag }}.tar
|
||||
retention-days: 1
|
||||
|
||||
push-docker-windows:
|
||||
needs:
|
||||
- build-docker-linux
|
||||
- build-docker-windows
|
||||
- determine-image-tag
|
||||
runs-on:
|
||||
- self-hosted
|
||||
- buildah
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: democratic-csi-windows-ltsc2019.tar
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: democratic-csi-windows-ltsc2022.tar
|
||||
- name: push windows images with buildah
|
||||
run: |
|
||||
#.github/bin/install_latest_buildah.sh
|
||||
buildah version
|
||||
.github/bin/docker-release-windows.sh
|
||||
env:
|
||||
DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
|
||||
DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
|
||||
GHCR_USERNAME: ${{ secrets.GHCR_USERNAME }}
|
||||
GHCR_PASSWORD: ${{ secrets.GHCR_PASSWORD }}
|
||||
DOCKER_CLI_EXPERIMENTAL: enabled
|
||||
IMAGE_TAG: ${{needs.determine-image-tag.outputs.tag}}
|
||||
|
|
|
|||
|
|
@ -1,2 +1,4 @@
|
|||
**~
|
||||
node_modules
|
||||
dev
|
||||
/ci/bin/*dev*
|
||||
|
|
|
|||
30
CHANGELOG.md
30
CHANGELOG.md
|
|
@ -1,3 +1,33 @@
|
|||
# v1.7.0
|
||||
|
||||
Released 2022-06-08
|
||||
|
||||
The windows release.
|
||||
|
||||
- windows smb, iscsi, and local-hostpath support (requires chart `v0.13.0+`)
|
||||
- ntfs, exfat, vfat fs support
|
||||
- `zfs-generic-smb` driver
|
||||
- synology improvements
|
||||
- DSM7 support
|
||||
- synology enhancements to allow templates to be configured at various
|
||||
'levels'
|
||||
- testing improvements
|
||||
- support (for testing) generating volume_id from name
|
||||
- test all the smb variants
|
||||
- test all nfs/smb client drivers
|
||||
- misc fixes
|
||||
- wait for chown/chmod jobs to complete (freenas)
|
||||
- general improvement to smb behavior throughout
|
||||
- better logging
|
||||
- better sudo logic throughout
|
||||
- minor fixes throughout
|
||||
- more robust logic for connecting to iscsi devices with partition tables
|
||||
- massive performance improvement for ssh-based drivers (reusing existing
|
||||
connection instead of new connection per-command)
|
||||
- dep bumps
|
||||
- trimmed container images
|
||||
- windows container images for 2019 and 2022
|
||||
|
||||
# v1.6.3
|
||||
|
||||
Released 2022-04-08
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y locales && rm -rf /var/lib/apt/lists/*
|
|||
&& localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
|
||||
|
||||
ENV LANG=en_US.utf8
|
||||
ENV NODE_VERSION=v16.14.2
|
||||
ENV NODE_VERSION=v16.15.1
|
||||
ENV NODE_ENV=production
|
||||
|
||||
# install build deps
|
||||
|
|
@ -43,6 +43,8 @@ RUN rm -rf docker
|
|||
FROM debian:11-slim
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
|
||||
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
|
||||
LABEL org.opencontainers.image.licenses MIT
|
||||
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
|
|
@ -73,7 +75,7 @@ COPY --from=build /usr/local/lib/nodejs/bin/node /usr/local/bin/node
|
|||
# netbase is required by rpcbind/rpcinfo to work properly
|
||||
# /etc/{services,rpc} are required
|
||||
RUN apt-get update && \
|
||||
apt-get install -y netbase socat e2fsprogs xfsprogs btrfs-progs fatresize dosfstools nfs-common cifs-utils sudo rsync && \
|
||||
apt-get install -y netbase socat e2fsprogs exfatprogs xfsprogs btrfs-progs fatresize dosfstools ntfs-3g nfs-common cifs-utils fdisk gdisk cloud-guest-utils sudo rsync && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# controller requirements
|
||||
|
|
|
|||
|
|
@ -0,0 +1,100 @@
|
|||
#
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/images/windows/powershell-helper/Dockerfile_windows
|
||||
# https://github.com/kubernetes/kubernetes/blob/master/test/images/busybox/Dockerfile_windows
|
||||
# https://github.com/kubernetes/kubernetes/tree/master/test/images#windows-test-images-considerations
|
||||
# https://stefanscherer.github.io/find-dependencies-in-windows-containers/
|
||||
#
|
||||
# docker build --build-arg NANO_BASE_TAG=1809 --build-arg CORE_BASE_TAG=ltsc2019 -t foobar -f Dockerfile.Windows .
|
||||
# docker run --rm -ti --entrypoint powershell foobar
|
||||
# docker run --rm foobar
|
||||
# docker save foobar -o foobar.tar
|
||||
# buildah pull docker-archive:foobar.tar
|
||||
|
||||
# mcr.microsoft.com/windows/servercore:ltsc2019
|
||||
# mcr.microsoft.com/windows/nanoserver:1809
|
||||
|
||||
ARG NANO_BASE_TAG
|
||||
ARG CORE_BASE_TAG
|
||||
|
||||
FROM mcr.microsoft.com/windows/servercore:${CORE_BASE_TAG} as powershell
|
||||
|
||||
# install powershell
|
||||
ENV PS_VERSION=6.2.7
|
||||
ADD https://github.com/PowerShell/PowerShell/releases/download/v$PS_VERSION/PowerShell-$PS_VERSION-win-x64.zip /PowerShell/powershell.zip
|
||||
|
||||
RUN cd C:\PowerShell &\
|
||||
tar.exe -xf powershell.zip &\
|
||||
del powershell.zip &\
|
||||
mklink powershell.exe pwsh.exe
|
||||
|
||||
|
||||
FROM mcr.microsoft.com/windows/servercore:${CORE_BASE_TAG} as build
|
||||
|
||||
SHELL ["powershell", "-Command", "$ErrorActionPreference = 'Stop'; $ProgressPreference = 'SilentlyContinue';"]
|
||||
|
||||
#ENV GPG_VERSION 4.0.2
|
||||
ENV GPG_VERSION 2.3.4
|
||||
|
||||
RUN Invoke-WebRequest $('https://files.gpg4win.org/gpg4win-vanilla-{0}.exe' -f $env:GPG_VERSION) -OutFile 'gpg4win.exe' -UseBasicParsing ; \
|
||||
Start-Process .\gpg4win.exe -ArgumentList '/S' -NoNewWindow -Wait
|
||||
|
||||
# https://github.com/nodejs/node#release-keys
|
||||
RUN @( \
|
||||
'4ED778F539E3634C779C87C6D7062848A1AB005C', \
|
||||
'141F07595B7B3FFE74309A937405533BE57C7D57', \
|
||||
'94AE36675C464D64BAFA68DD7434390BDBE9B9C5', \
|
||||
'74F12602B6F1C4E913FAA37AD3A89613643B6201', \
|
||||
'71DCFD284A79C3B38668286BC97EC7A07EDE3FC1', \
|
||||
'61FC681DFB92A079F1685E77973F295594EC4689', \
|
||||
'8FCCA13FEF1D0C2E91008E09770F7A9A5AE15600', \
|
||||
'C4F0DFFF4E8C1A8236409D08E73BC641CC11F4C8', \
|
||||
'C82FA3AE1CBEDC6BE46B9360C43CEC45C17AB93C', \
|
||||
'DD8F2338BAE7501E3DD5AC78C273792F7D83545D', \
|
||||
'A48C2BEE680E841632CD4E44F07496B3EB3C1762', \
|
||||
'108F52B48DB57BB0CC439B2997B01419BD92F80A', \
|
||||
'B9E2F5981AA6E0CD28160D9FF13993A75599653C' \
|
||||
) | foreach { \
|
||||
gpg --keyserver hkps://keys.openpgp.org --recv-keys $_ ; \
|
||||
}
|
||||
|
||||
ENV NODE_VERSION 16.15.1
|
||||
|
||||
RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/SHASUMS256.txt.asc' -f $env:NODE_VERSION) -OutFile 'SHASUMS256.txt.asc' -UseBasicParsing ;
|
||||
#RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/SHASUMS256.txt.asc' -f $env:NODE_VERSION) -OutFile 'SHASUMS256.txt.asc' -UseBasicParsing ; \
|
||||
# gpg --batch --decrypt --output SHASUMS256.txt SHASUMS256.txt.asc
|
||||
#gpg --verify SHASUMS256.txt.sig SHASUMS256.txt
|
||||
|
||||
RUN Invoke-WebRequest $('https://nodejs.org/dist/v{0}/node-v{0}-win-x64.zip' -f $env:NODE_VERSION) -OutFile 'node.zip' -UseBasicParsing ; \
|
||||
$sum = $(cat SHASUMS256.txt.asc | sls $(' node-v{0}-win-x64.zip' -f $env:NODE_VERSION)) -Split ' ' ; \
|
||||
if ((Get-FileHash node.zip -Algorithm sha256).Hash -ne $sum[0]) { Write-Error 'SHA256 mismatch' } ; \
|
||||
Expand-Archive node.zip -DestinationPath C:\ ; \
|
||||
Rename-Item -Path $('C:\node-v{0}-win-x64' -f $env:NODE_VERSION) -NewName 'C:\nodejs'
|
||||
|
||||
#RUN setx /M PATH "%PATH%;C:\nodejs"
|
||||
RUN setx /M PATH $(${Env:PATH} + \";C:\nodejs\")
|
||||
|
||||
RUN node --version; npm --version;
|
||||
|
||||
RUN mkdir /app
|
||||
WORKDIR /app
|
||||
|
||||
COPY package*.json ./
|
||||
RUN npm install --only=production; ls /
|
||||
COPY . .
|
||||
|
||||
FROM mcr.microsoft.com/windows/nanoserver:${NANO_BASE_TAG}
|
||||
|
||||
LABEL org.opencontainers.image.source https://github.com/democratic-csi/democratic-csi
|
||||
LABEL org.opencontainers.image.url https://github.com/democratic-csi/democratic-csi
|
||||
LABEL org.opencontainers.image.licenses MIT
|
||||
|
||||
# if additional dlls are required can copy like this
|
||||
#COPY --from=build /Windows/System32/nltest.exe /Windows/System32/nltest.exe
|
||||
|
||||
COPY --from=build /app /app
|
||||
WORKDIR /app
|
||||
|
||||
# this works for both host-process and non-host-process container semantics
|
||||
COPY --from=build /nodejs/node.exe ./bin
|
||||
|
||||
ENTRYPOINT [ "bin/node.exe", "--expose-gc", "bin/democratic-csi" ]
|
||||
71
README.md
71
README.md
|
|
@ -64,6 +64,16 @@ Predominantly 3 things are needed:
|
|||
|
||||
You should install/configure the requirements for both nfs and iscsi.
|
||||
|
||||
### cifs
|
||||
|
||||
```
|
||||
RHEL / CentOS
|
||||
sudo yum install -y cifs-utils
|
||||
|
||||
Ubuntu / Debian
|
||||
sudo apt-get install -y cifs-utils
|
||||
```
|
||||
|
||||
### nfs
|
||||
|
||||
```
|
||||
|
|
@ -176,6 +186,35 @@ volume is/was provisioned.
|
|||
The nature of this `driver` also prevents the enforcement of quotas. In short
|
||||
the requested volume size is generally ignored.
|
||||
|
||||
### windows
|
||||
|
||||
Support for Windows was introduced in `v1.7.0`. Currently support is limited
|
||||
to kubernetes nodes capabale of running `HostProcess` containers. Support was
|
||||
tested against `Windows Server 2019` using `rke2-v1.24`. Currently any of the
|
||||
`-smb` and `-iscsi` drivers will work. Support for `ntfs` was added to the
|
||||
linux nodes as well (using the `ntfs3` driver) so volumes created can be
|
||||
utilized by nodes with either operating system (in the case of `cifs` by both
|
||||
simultaneously).
|
||||
|
||||
Due to current limits in the kubernetes tooling it is not possible to use the
|
||||
`local-hostpath` driver but support is implemented in this project and will
|
||||
work as soon as kubernetes support is available.
|
||||
|
||||
```
|
||||
# ensure all updates are installed
|
||||
|
||||
# enable the container feature
|
||||
Enable-WindowsOptionalFeature -Online -FeatureName Containers –All
|
||||
|
||||
# create symbolic link due to current limitations in the driver-registrar container
|
||||
New-Item -ItemType SymbolicLink -Path "C:\registration\" -Target "C:\var\lib\kubelet\plugins_registry\"
|
||||
|
||||
# install a HostProcess compatible kubernetes
|
||||
```
|
||||
|
||||
- https://kubernetes.io/blog/2021/08/16/windows-hostprocess-containers/
|
||||
- https://kubernetes.io/docs/tasks/configure-pod-container/create-hostprocess-pod/
|
||||
|
||||
## Server Prep
|
||||
|
||||
Server preparation depends slightly on which `driver` you are using.
|
||||
|
|
@ -201,6 +240,7 @@ Ensure the following services are configurged and running:
|
|||
- ensure `zsh`, `bash`, or `sh` is set as the root shell, `csh` gives false errors due to quoting
|
||||
- nfs
|
||||
- iscsi
|
||||
|
||||
- (fixed in 12.0-U2+) when using the FreeNAS API concurrently the
|
||||
`/etc/ctl.conf` file on the server can become invalid, some sample scripts
|
||||
are provided in the `contrib` directory to clean things up ie: copy the
|
||||
|
|
@ -216,7 +256,7 @@ Ensure the following services are configurged and running:
|
|||
- `curl --header "Accept: application/json" --user root:<password> 'http(s)://<ip>/api/v2.0/iscsi/initiator'`
|
||||
- `curl --header "Accept: application/json" --user root:<password> 'http(s)://<ip>/api/v2.0/iscsi/auth'`
|
||||
- The maximum number of volumes is limited to 255 by default on FreeBSD (physical devices such as disks and CD-ROM drives count against this value).
|
||||
Be sure to properly adjust both [tunables](https://www.freebsd.org/cgi/man.cgi?query=ctl&sektion=4#end) `kern.cam.ctl.max_ports` and `kern.cam.ctl.max_luns` to avoid running out of resources when dynamically provisioning iSCSI volumes on FreeNAS or TrueNAS Core.
|
||||
Be sure to properly adjust both [tunables](https://www.freebsd.org/cgi/man.cgi?query=ctl&sektion=4#end) `kern.cam.ctl.max_ports` and `kern.cam.ctl.max_luns` to avoid running out of resources when dynamically provisioning iSCSI volumes on FreeNAS or TrueNAS Core.
|
||||
|
||||
- smb
|
||||
|
||||
|
|
@ -273,17 +313,38 @@ Issues to review:
|
|||
- https://jira.ixsystems.com/browse/NAS-108522
|
||||
- https://jira.ixsystems.com/browse/NAS-107219
|
||||
|
||||
### ZoL (zfs-generic-nfs, zfs-generic-iscsi)
|
||||
### ZoL (zfs-generic-nfs, zfs-generic-iscsi, zfs-generic-smb)
|
||||
|
||||
Ensure ssh and zfs is installed on the nfs/iscsi server and that you have installed
|
||||
`targetcli`.
|
||||
|
||||
- `sudo yum install targetcli -y`
|
||||
- `sudo apt-get -y install targetcli-fb`
|
||||
The driver executes many commands over an ssh connection. You may consider
|
||||
disabling all the `motd` details for the ssh user as it can spike the cpu
|
||||
unecessarily:
|
||||
|
||||
- https://askubuntu.com/questions/318592/how-can-i-remove-the-landscape-canonical-com-greeting-from-motd
|
||||
- https://linuxconfig.org/disable-dynamic-motd-and-news-on-ubuntu-20-04-focal-fossa-linux
|
||||
|
||||
```
|
||||
####### iscsi
|
||||
yum install targetcli -y
|
||||
apt-get -y install targetcli-fb
|
||||
|
||||
####### smb
|
||||
apt-get install -y samba smbclient
|
||||
|
||||
# create posix user
|
||||
groupadd -g 1001 smbroot
|
||||
useradd -u 1001 -g 1001 -M -N -s /sbin/nologin smbroot
|
||||
passwd smbroot (optional)
|
||||
|
||||
# create smb user and set password
|
||||
smbpasswd -L -a smbroot
|
||||
```
|
||||
|
||||
### Synology (synology-iscsi)
|
||||
|
||||
Ensure iscsi manager has been installed and is generally setup/configured.
|
||||
Ensure iscsi manager has been installed and is generally setup/configured. DSM 6.3+ is supported.
|
||||
|
||||
## Helm Installation
|
||||
|
||||
|
|
|
|||
|
|
@ -1,10 +1,18 @@
|
|||
#!/usr/bin/env -S node --expose-gc ${NODE_OPTIONS_CSI_1} ${NODE_OPTIONS_CSI_2} ${NODE_OPTIONS_CSI_3} ${NODE_OPTIONS_CSI_4} ${NODE_OPTIONS_CSI_5}
|
||||
#!/usr/bin/env -S node --expose-gc ${NODE_OPTIONS_CSI_1} ${NODE_OPTIONS_CSI_2} ${NODE_OPTIONS_CSI_3} ${NODE_OPTIONS_CSI_4}
|
||||
|
||||
/**
|
||||
* keep the shebang line length under 128
|
||||
* https://github.com/democratic-csi/democratic-csi/issues/171
|
||||
*/
|
||||
|
||||
// polyfills
|
||||
require("../src/utils/polyfills");
|
||||
const yaml = require("js-yaml");
|
||||
const fs = require("fs");
|
||||
const { grpc } = require("../src/utils/grpc");
|
||||
const { stringify } = require("../src/utils/general");
|
||||
const { stringify, stripWindowsDriveLetter } = require("../src/utils/general");
|
||||
|
||||
let driverConfigFile;
|
||||
let options;
|
||||
const args = require("yargs")
|
||||
.env("DEMOCRATIC_CSI")
|
||||
|
|
@ -14,17 +22,29 @@ const args = require("yargs")
|
|||
describe: "provide a path to driver config file",
|
||||
config: true,
|
||||
configParser: (path) => {
|
||||
try {
|
||||
options = JSON.parse(fs.readFileSync(path, "utf-8"));
|
||||
return true;
|
||||
} catch (e) {}
|
||||
// normalize path for host-process containers
|
||||
// CONTAINER_SANDBOX_MOUNT_POINT C:\C\0eac9a8da76f6d7119c5d9f86c8b3106d67dbbf01dbeb22fdc0192476b7e31cb\
|
||||
// path is injected as C:\config\driver-config-file.yaml
|
||||
if (process.env.CONTAINER_SANDBOX_MOUNT_POINT) {
|
||||
path = `${
|
||||
process.env.CONTAINER_SANDBOX_MOUNT_POINT
|
||||
}${stripWindowsDriveLetter(path)}`;
|
||||
}
|
||||
|
||||
try {
|
||||
options = yaml.load(fs.readFileSync(path, "utf8"));
|
||||
return true;
|
||||
} catch (e) {}
|
||||
try {
|
||||
driverConfigFile = fs.realpathSync(path);
|
||||
} catch (e) {
|
||||
console.log("failed finding config file realpath: " + e.toString());
|
||||
driverConfigFile = path;
|
||||
}
|
||||
|
||||
throw new Error("failed parsing config file: " + path);
|
||||
return true;
|
||||
} catch (e) {
|
||||
console.log("failed parsing config file: " + path);
|
||||
throw e;
|
||||
}
|
||||
},
|
||||
})
|
||||
.demandOption(["driver-config-file"], "driver-config-file is required")
|
||||
|
|
@ -136,6 +156,14 @@ let operationLock = new Set();
|
|||
|
||||
async function requestHandlerProxy(call, callback, serviceMethodName) {
|
||||
const cleansedCall = JSON.parse(stringify(call));
|
||||
|
||||
delete cleansedCall.call;
|
||||
delete cleansedCall.canceled;
|
||||
for (const key in cleansedCall) {
|
||||
if (key.startsWith("_")) {
|
||||
delete cleansedCall[key];
|
||||
}
|
||||
}
|
||||
for (const key in cleansedCall.request) {
|
||||
if (key.includes("secret")) {
|
||||
cleansedCall.request[key] = "redacted";
|
||||
|
|
@ -165,6 +193,18 @@ async function requestHandlerProxy(call, callback, serviceMethodName) {
|
|||
});
|
||||
}
|
||||
|
||||
// for testing purposes
|
||||
//await GeneralUtils.sleep(10000);
|
||||
//throw new Error("fake error");
|
||||
|
||||
// for CI/testing purposes
|
||||
if (["NodePublishVolume", "NodeStageVolume"].includes(serviceMethodName)) {
|
||||
await driver.setVolumeContextCache(
|
||||
call.request.volume_id,
|
||||
call.request.volume_context
|
||||
);
|
||||
}
|
||||
|
||||
let response;
|
||||
let responseError;
|
||||
try {
|
||||
|
|
@ -190,12 +230,21 @@ async function requestHandlerProxy(call, callback, serviceMethodName) {
|
|||
throw responseError;
|
||||
}
|
||||
|
||||
// for CI/testing purposes
|
||||
if (serviceMethodName == "CreateVolume") {
|
||||
await driver.setVolumeContextCache(
|
||||
response.volume.volume_id,
|
||||
response.volume.volume_context
|
||||
);
|
||||
}
|
||||
|
||||
logger.info(
|
||||
"new response - driver: %s method: %s response: %j",
|
||||
driver.constructor.name,
|
||||
serviceMethodName,
|
||||
response
|
||||
);
|
||||
|
||||
callback(null, response);
|
||||
} catch (e) {
|
||||
let message;
|
||||
|
|
@ -205,7 +254,7 @@ async function requestHandlerProxy(call, callback, serviceMethodName) {
|
|||
message += ` ${e.stack}`;
|
||||
}
|
||||
} else {
|
||||
message = JSON.stringify(e);
|
||||
message = stringify(e);
|
||||
}
|
||||
|
||||
logger.error(
|
||||
|
|
@ -336,9 +385,11 @@ if (args.serverSocket) {
|
|||
}
|
||||
|
||||
logger.info(
|
||||
"starting csi server - name: %s, version: %s, driver: %s, mode: %s, csi version: %s, address: %s, socket: %s",
|
||||
args.csiName,
|
||||
"starting csi server - node version: %s, package version: %s, config file: %s, csi-name: %s, csi-driver: %s, csi-mode: %s, csi-version: %s, address: %s, socket: %s",
|
||||
process.version,
|
||||
args.version,
|
||||
driverConfigFile,
|
||||
args.csiName,
|
||||
options.driver,
|
||||
args.csiMode.join(","),
|
||||
args.csiVersion,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,19 @@
|
|||
Write-Output "current user"
|
||||
whoami
|
||||
Write-Output "current working directory"
|
||||
(Get-Location).Path
|
||||
Write-Output "current PATH"
|
||||
$Env:PATH
|
||||
|
||||
Write-Output "node version"
|
||||
node --version
|
||||
Write-Output "npm version"
|
||||
npm --version
|
||||
|
||||
# install deps
|
||||
Write-Output "running npm i"
|
||||
npm i
|
||||
|
||||
Write-Output "creating tar.gz"
|
||||
# tar node_modules to keep the number of files low to upload
|
||||
tar -zcf node_modules-windows-amd64.tar.gz node_modules
|
||||
|
|
@ -12,4 +12,4 @@ npm --version
|
|||
npm i
|
||||
|
||||
# tar node_modules to keep the number of files low to upload
|
||||
tar -zcf node_modules.tar.gz node_modules
|
||||
tar -zcf node_modules-linux-amd64.tar.gz node_modules
|
||||
|
|
|
|||
|
|
@ -0,0 +1,16 @@
|
|||
#Set-StrictMode -Version Latest
|
||||
#$ErrorActionPreference = "Stop"
|
||||
#$PSDefaultParameterValues['*:ErrorAction'] = "Stop"
|
||||
function ThrowOnNativeFailure {
|
||||
if (-not $?) {
|
||||
throw 'Native Failure'
|
||||
}
|
||||
}
|
||||
|
||||
function psenvsubstr($data) {
|
||||
foreach($v in Get-ChildItem env:) {
|
||||
$key = '${' + $v.Name + '}'
|
||||
$data = $data.Replace($key, $v.Value)
|
||||
}
|
||||
return $data
|
||||
}
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
if (! $PSScriptRoot) {
|
||||
$PSScriptRoot = $args[0]
|
||||
}
|
||||
|
||||
. "${PSScriptRoot}\helper.ps1"
|
||||
|
||||
Set-Location $env:PWD
|
||||
|
||||
Write-Output "launching csi-grpc-proxy"
|
||||
|
||||
$env:PROXY_TO = "npipe://" + $env:NPIPE_ENDPOINT
|
||||
$env:BIND_TO = "unix://" + $env:CSI_ENDPOINT
|
||||
|
||||
# https://stackoverflow.com/questions/2095088/error-when-calling-3rd-party-executable-from-powershell-when-using-an-ide
|
||||
csi-grpc-proxy.exe 2>&1 | % { "$_" }
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
if (! $PSScriptRoot) {
|
||||
$PSScriptRoot = $args[0]
|
||||
}
|
||||
|
||||
. "${PSScriptRoot}\helper.ps1"
|
||||
|
||||
Set-Location $env:PWD
|
||||
|
||||
$exit_code = 0
|
||||
$tmpdir = New-Item -ItemType Directory -Path ([System.IO.Path]::GetTempPath()) -Name ([System.IO.Path]::GetRandomFileName())
|
||||
$env:CSI_SANITY_TEMP_DIR = $tmpdir.FullName
|
||||
|
||||
# cleanse endpoint to something csi-sanity plays nicely with
|
||||
$endpoint = ${env:CSI_ENDPOINT}
|
||||
$endpoint = $endpoint.replace("C:\", "/")
|
||||
$endpoint = $endpoint.replace("\", "/")
|
||||
|
||||
if (! $env:CSI_SANITY_FAILFAST) {
|
||||
$env:CSI_SANITY_FAILFAST = "false"
|
||||
}
|
||||
|
||||
$failfast = ""
|
||||
|
||||
if ($env:CSI_SANITY_FAILFAST -eq "true") {
|
||||
$failfast = "-ginkgo.failFast"
|
||||
}
|
||||
|
||||
Write-Output "launching csi-sanity"
|
||||
Write-Output "connecting to: ${endpoint}"
|
||||
Write-Output "failfast: ${env:CSI_SANITY_FAILFAST}"
|
||||
Write-Output "skip: ${env:CSI_SANITY_SKIP}"
|
||||
Write-Output "focus: ${env:CSI_SANITY_FOCUS}"
|
||||
|
||||
$skip = '"' + ${env:CSI_SANITY_SKIP} + '"'
|
||||
$focus = '"' + ${env:CSI_SANITY_FOCUS} + '"'
|
||||
|
||||
csi-sanity.exe -"csi.endpoint" "unix://${endpoint}" `
|
||||
$failfast `
|
||||
-"csi.mountdir" "${env:CSI_SANITY_TEMP_DIR}\mnt" `
|
||||
-"csi.stagingdir" "${env:CSI_SANITY_TEMP_DIR}\stage" `
|
||||
-"csi.testvolumeexpandsize" 2147483648 `
|
||||
-"csi.testvolumesize" 1073741824 `
|
||||
-"ginkgo.skip" $skip `
|
||||
-"ginkgo.focus" $focus
|
||||
|
||||
# does not work the same as linux for some reason
|
||||
# -"ginkgo.skip" "'" + ${env:CSI_SANITY_SKIP} + "'" `
|
||||
|
||||
if (-not $?) {
|
||||
$exit_code = $LASTEXITCODE
|
||||
Write-Output "csi-sanity exit code: ${exit_code}"
|
||||
if ($exit_code -gt 0) {
|
||||
$exit_code = 1
|
||||
}
|
||||
}
|
||||
|
||||
# remove tmp dir
|
||||
Remove-Item -Path "$env:CSI_SANITY_TEMP_DIR" -Force -Recurse
|
||||
|
||||
#Exit $exit_code
|
||||
Write-Output "exiting with exit code: ${exit_code}"
|
||||
|
||||
if ($exit_code -gt 0) {
|
||||
throw "csi-sanity failed"
|
||||
}
|
||||
|
||||
# these do not work for whatever reason
|
||||
#Exit $exit_code
|
||||
#[System.Environment]::Exit($exit_code)
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
if (! $PSScriptRoot) {
|
||||
$PSScriptRoot = $args[0]
|
||||
}
|
||||
|
||||
. "${PSScriptRoot}\helper.ps1"
|
||||
|
||||
Set-Location $env:PWD
|
||||
Write-Output "launching server"
|
||||
|
||||
$env:LOG_LEVEL = "debug"
|
||||
$env:CSI_VERSION = "1.5.0"
|
||||
$env:CSI_NAME = "driver-test"
|
||||
$env:CSI_SANITY = "1"
|
||||
|
||||
if (! ${env:CONFIG_FILE}) {
|
||||
$env:CONFIG_FILE = $env:TEMP + "\csi-config-" + $env:CI_BUILD_KEY + ".yaml"
|
||||
if ($env:TEMPLATE_CONFIG_FILE) {
|
||||
$config_data = Get-Content "${env:TEMPLATE_CONFIG_FILE}" -Raw
|
||||
$config_data = psenvsubstr($config_data)
|
||||
$config_data | Set-Content "${env:CONFIG_FILE}"
|
||||
}
|
||||
}
|
||||
|
||||
node "${PSScriptRoot}\..\..\bin\democratic-csi" `
|
||||
--log-level "$env:LOG_LEVEL" `
|
||||
--driver-config-file "$env:CONFIG_FILE" `
|
||||
--csi-version "$env:CSI_VERSION" `
|
||||
--csi-name "$env:CSI_NAME" `
|
||||
--server-socket "${env:NPIPE_ENDPOINT}" 2>&1 | % { "$_" }
|
||||
|
|
@ -3,6 +3,9 @@
|
|||
set -e
|
||||
set -x
|
||||
|
||||
export PATH="/usr/local/lib/nodejs/bin:${PATH}"
|
||||
echo "current launch-server PATH: ${PATH}"
|
||||
|
||||
: ${CI_BUILD_KEY:="local"}
|
||||
: ${TEMPLATE_CONFIG_FILE:=${1}}
|
||||
: ${CSI_MODE:=""}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,133 @@
|
|||
# https://stackoverflow.com/questions/2095088/error-when-calling-3rd-party-executable-from-powershell-when-using-an-ide
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# $mypath = $MyInvocation.MyCommand.Path
|
||||
# Get-ChildItem env:\
|
||||
# Get-Job | Where-Object -Property State -eq “Running”
|
||||
# Get-Location (like pwd)
|
||||
# if ($null -eq $env:FOO) { $env:FOO = 'bar' }
|
||||
|
||||
. "${PSScriptRoot}\helper.ps1"
|
||||
|
||||
#Set-PSDebug -Trace 2
|
||||
|
||||
Write-Output "current user"
|
||||
whoami
|
||||
Write-Output "current working directory"
|
||||
(Get-Location).Path
|
||||
Write-Output "current PATH"
|
||||
$Env:PATH
|
||||
|
||||
function Job-Cleanup() {
|
||||
Get-Job | Stop-Job
|
||||
Get-Job | Remove-Job
|
||||
}
|
||||
|
||||
# start clean
|
||||
Job-Cleanup
|
||||
|
||||
# install from artifacts
|
||||
if ((Test-Path "node_modules-windows-amd64.tar.gz") -and !(Test-Path "node_modules")) {
|
||||
Write-Output "extracting node_modules-windows-amd64.tar.gz"
|
||||
tar -zxf node_modules-windows-amd64.tar.gz
|
||||
}
|
||||
|
||||
# setup env
|
||||
$env:PWD = (Get-Location).Path
|
||||
$env:CI_BUILD_KEY = ([guid]::NewGuid() -Split "-")[0]
|
||||
$env:CSI_ENDPOINT = $env:TEMP + "\csi-sanity-" + $env:CI_BUILD_KEY + ".sock"
|
||||
$env:NPIPE_ENDPOINT = "//./pipe/csi-sanity-" + $env:CI_BUILD_KEY + "csi.sock"
|
||||
|
||||
# testing values
|
||||
if (Test-Path "${PSScriptRoot}\run-dev.ps1") {
|
||||
. "${PSScriptRoot}\run-dev.ps1"
|
||||
}
|
||||
|
||||
# launch server
|
||||
$server_job = Start-Job -FilePath .\ci\bin\launch-server.ps1 -InitializationScript {} -ArgumentList $PSScriptRoot
|
||||
|
||||
# launch csi-grpc-proxy
|
||||
$csi_grpc_proxy_job = Start-Job -FilePath .\ci\bin\launch-csi-grpc-proxy.ps1 -InitializationScript {} -ArgumentList $PSScriptRoot
|
||||
|
||||
# wait for socket to appear
|
||||
$iter = 0
|
||||
$max_iter = 60
|
||||
$started = 1
|
||||
while (!(Test-Path "${env:CSI_ENDPOINT}")) {
|
||||
$iter++
|
||||
Write-Output "Waiting for ${env:CSI_ENDPOINT} to appear"
|
||||
Start-Sleep 1
|
||||
try {
|
||||
Get-Job | Receive-Job
|
||||
} catch {}
|
||||
if ($iter -gt $max_iter) {
|
||||
Write-Output "${env:CSI_ENDPOINT} failed to appear"
|
||||
$started = 0
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
# launch csi-sanity
|
||||
if ($started -eq 1) {
|
||||
$csi_sanity_job = Start-Job -FilePath .\ci\bin\launch-csi-sanity.ps1 -InitializationScript {} -ArgumentList $PSScriptRoot
|
||||
}
|
||||
|
||||
# https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/get-job?view=powershell-7.2
|
||||
# -ChildJobState
|
||||
$iter = 0
|
||||
while ($csi_sanity_job -and ($csi_sanity_job.State -eq "Running" -or $csi_sanity_job.State -eq "NotStarted")) {
|
||||
$iter++
|
||||
foreach ($job in Get-Job) {
|
||||
if (($job -eq $csi_grpc_proxy_job) -and ($iter -gt 20)) {
|
||||
continue
|
||||
}
|
||||
if (!$job.HasMoreData) {
|
||||
continue
|
||||
}
|
||||
try {
|
||||
$job | Receive-Job
|
||||
}
|
||||
catch {
|
||||
if ($job.State -ne "Failed") {
|
||||
Write-Output "failure receiving job data: ${_}"
|
||||
# just swallow the errors as it seems there are various reasons errors
|
||||
# may show up (perhaps no data currently, etc)
|
||||
#$job | fl
|
||||
#throw $_
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# spew any remaining job output to the console
|
||||
foreach ($job in Get-Job) {
|
||||
if ($job -eq $csi_grpc_proxy_job) {
|
||||
continue
|
||||
}
|
||||
try {
|
||||
$job | Receive-Job
|
||||
}
|
||||
catch {}
|
||||
}
|
||||
|
||||
# wait for good measure
|
||||
if ($csi_sanity_job) {
|
||||
Wait-Job -Job $csi_sanity_job
|
||||
}
|
||||
|
||||
#Get-Job | fl
|
||||
|
||||
$exit_code = 0
|
||||
|
||||
if (! $csi_sanity_job) {
|
||||
$exit_code = 1
|
||||
}
|
||||
|
||||
if ($csi_sanity_job -and $csi_sanity_job.State -eq "Failed") {
|
||||
$exit_code = 1
|
||||
}
|
||||
|
||||
# cleanup after ourselves
|
||||
Job-Cleanup
|
||||
Exit $exit_code
|
||||
|
|
@ -15,8 +15,8 @@ export PATH="/usr/local/lib/nodejs/bin:${PATH}"
|
|||
# install deps
|
||||
#npm i
|
||||
# install from artifacts
|
||||
if [[ -f "node_modules.tar.gz" ]];then
|
||||
tar -zxf node_modules.tar.gz
|
||||
if [[ -f "node_modules-linux-amd64.tar.gz" && ! -d "node_modules" ]];then
|
||||
tar -zxf node_modules-linux-amd64.tar.gz
|
||||
fi
|
||||
|
||||
# generate key for paths etc
|
||||
|
|
|
|||
|
|
@ -0,0 +1,10 @@
|
|||
driver: nfs-client
|
||||
instance_id:
|
||||
nfs:
|
||||
shareHost: ${SERVER_HOST}
|
||||
shareBasePath: "/mnt/tank/client/nfs/${CI_BUILD_KEY}"
|
||||
# shareHost:shareBasePath should be mounted at this location in the controller container
|
||||
controllerBasePath: "/mnt/client/nfs/${CI_BUILD_KEY}"
|
||||
dirPermissionsMode: "0777"
|
||||
dirPermissionsUser: 0
|
||||
dirPermissionsGroup: 0
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
driver: smb-client
|
||||
instance_id:
|
||||
smb:
|
||||
shareHost: ${SERVER_HOST}
|
||||
shareBasePath: "${SHARE_NAME}/${CI_BUILD_KEY}"
|
||||
# shareHost:shareBasePath should be mounted at this location in the controller container
|
||||
controllerBasePath: "/mnt/client/smb/${CI_BUILD_KEY}"
|
||||
dirPermissionsMode: "0777"
|
||||
dirPermissionsUser: 0
|
||||
dirPermissionsGroup: 0
|
||||
|
||||
node:
|
||||
mount:
|
||||
mount_flags: "username=smbroot,password=smbroot"
|
||||
|
|
@ -16,8 +16,8 @@ iscsi:
|
|||
targetPortal: ${SYNOLOGY_HOST}
|
||||
targetPortals: []
|
||||
baseiqn: "iqn.2000-01.com.synology:XpenoDsm62x."
|
||||
namePrefix: "csi-${CI_BUILD_KEY}-"
|
||||
nameSuffix: "-ci"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
|
||||
lunTemplate:
|
||||
# btrfs thin provisioning
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
driver: synology-iscsi
|
||||
httpConnection:
|
||||
protocol: http
|
||||
host: ${SYNOLOGY_HOST}
|
||||
port: ${SYNOLOGY_PORT}
|
||||
username: ${SYNOLOGY_USERNAME}
|
||||
password: ${SYNOLOGY_PASSWORD}
|
||||
allowInsecure: true
|
||||
session: "democratic-csi-${CI_BUILD_KEY}"
|
||||
serialize: true
|
||||
|
||||
synology:
|
||||
volume: ${SYNOLOGY_VOLUME}
|
||||
|
||||
iscsi:
|
||||
targetPortal: ${SYNOLOGY_HOST}
|
||||
targetPortals: []
|
||||
baseiqn: "iqn.2000-01.com.synology:XpenoDsm62x."
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
|
||||
lunTemplate:
|
||||
# btrfs thin provisioning
|
||||
type: "BLUN"
|
||||
# tpws = Hardware-assisted zeroing
|
||||
# caw = Hardware-assisted locking
|
||||
# 3pc = Hardware-assisted data transfer
|
||||
# tpu = Space reclamation
|
||||
# can_snapshot = Snapshot
|
||||
#dev_attribs:
|
||||
#- dev_attrib: emulate_tpws
|
||||
# enable: 1
|
||||
#- dev_attrib: emulate_caw
|
||||
# enable: 1
|
||||
#- dev_attrib: emulate_3pc
|
||||
# enable: 1
|
||||
#- dev_attrib: emulate_tpu
|
||||
# enable: 0
|
||||
#- dev_attrib: can_snapshot
|
||||
# enable: 1
|
||||
|
||||
# btfs thick provisioning
|
||||
# only zeroing and locking supported
|
||||
#type: "BLUN_THICK"
|
||||
# tpws = Hardware-assisted zeroing
|
||||
# caw = Hardware-assisted locking
|
||||
#dev_attribs:
|
||||
#- dev_attrib: emulate_tpws
|
||||
# enable: 1
|
||||
#- dev_attrib: emulate_caw
|
||||
# enable: 1
|
||||
|
||||
# ext4 thinn provisioning UI sends everything with enabled=0
|
||||
#type: "THIN"
|
||||
|
||||
# ext4 thin with advanced legacy features set
|
||||
# can only alter tpu (all others are set as enabled=1)
|
||||
#type: "ADV"
|
||||
#dev_attribs:
|
||||
#- dev_attrib: emulate_tpu
|
||||
# enable: 1
|
||||
|
||||
# ext4 thick
|
||||
# can only alter caw
|
||||
#type: "FILE"
|
||||
#dev_attribs:
|
||||
#- dev_attrib: emulate_caw
|
||||
# enable: 1
|
||||
|
||||
lunSnapshotTemplate:
|
||||
is_locked: true
|
||||
# https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot
|
||||
is_app_consistent: true
|
||||
|
||||
targetTemplate:
|
||||
auth_type: 0
|
||||
max_sessions: 0
|
||||
|
|
@ -26,7 +26,7 @@ zfs:
|
|||
iscsi:
|
||||
targetPortal: ${TRUENAS_HOST}
|
||||
interface: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
targetGroups:
|
||||
- targetGroupPortalGroup: 1
|
||||
|
|
@ -35,3 +35,10 @@ iscsi:
|
|||
targetGroupAuthGroup:
|
||||
# 0-100 (0 == ignore)
|
||||
extentAvailThreshold: 0
|
||||
|
||||
# overcome the 63 char limit for testing purposes only
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
idHash:
|
||||
strategy: crc16
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ zfs:
|
|||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
|
|||
|
|
@ -17,33 +17,29 @@ sshConnection:
|
|||
zfs:
|
||||
datasetProperties:
|
||||
# smb options
|
||||
#aclmode: restricted
|
||||
#casesensitivity: mixed
|
||||
aclmode: restricted
|
||||
aclinherit: passthrough
|
||||
acltype: nfsv4
|
||||
casesensitivity: insensitive
|
||||
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0770"
|
||||
datasetPermissionsUser: 1001
|
||||
datasetPermissionsGroup: 1001
|
||||
|
||||
# for smb with guest
|
||||
#datasetPermissionsUser: nobody
|
||||
#datasetPermissionsGroup: nobody
|
||||
|
||||
#datasetPermissionsGroup: root
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m u:kube:full_set:allow"
|
||||
datasetPermissionsAcls:
|
||||
- "-m g:builtin_users:full_set:fd:allow"
|
||||
- "-m group@:modify_set:fd:allow"
|
||||
- "-m owner@:full_set:fd:allow"
|
||||
|
||||
smb:
|
||||
shareHost: ${TRUENAS_HOST}
|
||||
#nameTemplate: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
shareAuxiliaryConfigurationTemplate: |
|
||||
#guest ok = yes
|
||||
|
|
@ -52,11 +48,21 @@ smb:
|
|||
shareAllowedHosts: []
|
||||
shareDeniedHosts: []
|
||||
#shareDefaultPermissions: true
|
||||
shareGuestOk: true
|
||||
shareGuestOk: false
|
||||
#shareGuestOnly: true
|
||||
#shareShowHiddenFiles: true
|
||||
shareRecycleBin: true
|
||||
shareRecycleBin: false
|
||||
shareBrowsable: false
|
||||
shareAccessBasedEnumeration: true
|
||||
shareTimeMachine: false
|
||||
#shareStorageTask:
|
||||
|
||||
node:
|
||||
mount:
|
||||
mount_flags: "username=smbroot,password=smbroot"
|
||||
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
idHash:
|
||||
strategy: crc16
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ zfs:
|
|||
iscsi:
|
||||
targetPortal: ${TRUENAS_HOST}
|
||||
interface: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
targetGroups:
|
||||
- targetGroupPortalGroup: 1
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ zfs:
|
|||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
|
|||
|
|
@ -17,33 +17,29 @@ sshConnection:
|
|||
zfs:
|
||||
datasetProperties:
|
||||
# smb options
|
||||
#aclmode: restricted
|
||||
#casesensitivity: mixed
|
||||
aclmode: restricted
|
||||
aclinherit: passthrough
|
||||
acltype: nfsv4
|
||||
casesensitivity: insensitive
|
||||
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0770"
|
||||
datasetPermissionsUser: 1001
|
||||
datasetPermissionsGroup: 1001
|
||||
|
||||
# for smb with guest
|
||||
#datasetPermissionsUser: nobody
|
||||
#datasetPermissionsGroup: nobody
|
||||
|
||||
#datasetPermissionsGroup: root
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m u:kube:full_set:allow"
|
||||
datasetPermissionsAcls:
|
||||
- "-m g:builtin_users:full_set:fd:allow"
|
||||
- "-m group@:modify_set:fd:allow"
|
||||
- "-m owner@:full_set:fd:allow"
|
||||
|
||||
smb:
|
||||
shareHost: ${TRUENAS_HOST}
|
||||
#nameTemplate: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
shareAuxiliaryConfigurationTemplate: |
|
||||
#guest ok = yes
|
||||
|
|
@ -52,11 +48,21 @@ smb:
|
|||
shareAllowedHosts: []
|
||||
shareDeniedHosts: []
|
||||
#shareDefaultPermissions: true
|
||||
shareGuestOk: true
|
||||
shareGuestOk: false
|
||||
#shareGuestOnly: true
|
||||
#shareShowHiddenFiles: true
|
||||
shareRecycleBin: true
|
||||
shareRecycleBin: false
|
||||
shareBrowsable: false
|
||||
shareAccessBasedEnumeration: true
|
||||
shareTimeMachine: false
|
||||
#shareStorageTask:
|
||||
|
||||
node:
|
||||
mount:
|
||||
mount_flags: "username=smbroot,password=smbroot"
|
||||
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
idHash:
|
||||
strategy: crc16
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ zfs:
|
|||
iscsi:
|
||||
targetPortal: ${TRUENAS_HOST}
|
||||
interface: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
targetGroups:
|
||||
- targetGroupPortalGroup: 1
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ zfs:
|
|||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
|
|||
|
|
@ -9,35 +9,19 @@ httpConnection:
|
|||
password: ${TRUENAS_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetProperties:
|
||||
# smb options
|
||||
#aclmode: restricted
|
||||
#casesensitivity: mixed
|
||||
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0770"
|
||||
datasetPermissionsUser: 1001
|
||||
datasetPermissionsGroup: 1001
|
||||
|
||||
# for smb with guest
|
||||
#datasetPermissionsUser: nobody
|
||||
#datasetPermissionsGroup: nobody
|
||||
|
||||
#datasetPermissionsGroup: root
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m u:kube:full_set:allow"
|
||||
|
||||
smb:
|
||||
shareHost: ${TRUENAS_HOST}
|
||||
#nameTemplate: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
shareAuxiliaryConfigurationTemplate: |
|
||||
#guest ok = yes
|
||||
|
|
@ -46,11 +30,21 @@ smb:
|
|||
shareAllowedHosts: []
|
||||
shareDeniedHosts: []
|
||||
#shareDefaultPermissions: true
|
||||
shareGuestOk: true
|
||||
shareGuestOk: false
|
||||
#shareGuestOnly: true
|
||||
#shareShowHiddenFiles: true
|
||||
shareRecycleBin: true
|
||||
shareRecycleBin: false
|
||||
shareBrowsable: false
|
||||
shareAccessBasedEnumeration: true
|
||||
shareTimeMachine: false
|
||||
#shareStorageTask:
|
||||
|
||||
node:
|
||||
mount:
|
||||
mount_flags: "username=smbroot,password=smbroot"
|
||||
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
idHash:
|
||||
strategy: crc16
|
||||
|
|
|
|||
|
|
@ -0,0 +1,31 @@
|
|||
driver: zfs-generic-iscsi
|
||||
|
||||
sshConnection:
|
||||
host: ${SERVER_HOST}
|
||||
port: 22
|
||||
username: ${SERVER_USERNAME}
|
||||
password: ${SERVER_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
zvolCompression:
|
||||
zvolDedup:
|
||||
zvolEnableReservation: false
|
||||
zvolBlocksize:
|
||||
|
||||
iscsi:
|
||||
targetPortal: ${SERVER_HOST}
|
||||
interface: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
nameSuffix: ""
|
||||
shareStrategy: "targetCli"
|
||||
shareStrategyTargetCli:
|
||||
basename: "iqn.2003-01.org.linux-iscsi.ubuntu-19.x8664"
|
||||
tpg:
|
||||
attributes:
|
||||
authentication: 0
|
||||
generate_node_acls: 1
|
||||
cache_dynamic_acls: 1
|
||||
demo_mode_write_protect: 0
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
driver: zfs-generic-smb
|
||||
|
||||
sshConnection:
|
||||
host: ${SERVER_HOST}
|
||||
port: 22
|
||||
username: ${SERVER_USERNAME}
|
||||
password: ${SERVER_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetProperties:
|
||||
#aclmode: restricted
|
||||
#aclinherit: passthrough
|
||||
#acltype: nfsv4
|
||||
casesensitivity: insensitive
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0770"
|
||||
datasetPermissionsUser: smbroot
|
||||
datasetPermissionsGroup: smbroot
|
||||
|
||||
smb:
|
||||
shareHost: ${SERVER_HOST}
|
||||
shareStrategy: "setDatasetProperties"
|
||||
shareStrategySetDatasetProperties:
|
||||
properties:
|
||||
sharesmb: "on"
|
||||
|
||||
node:
|
||||
mount:
|
||||
mount_flags: "username=smbroot,password=smbroot"
|
||||
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
idHash:
|
||||
strategy: crc16
|
||||
|
|
@ -18,7 +18,7 @@ zfs:
|
|||
iscsi:
|
||||
targetPortal: ${SERVER_HOST}
|
||||
interface: ""
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}"
|
||||
namePrefix: "csi-ci-${CI_BUILD_KEY}-"
|
||||
nameSuffix: ""
|
||||
shareStrategy: "targetCli"
|
||||
shareStrategyTargetCli:
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ zfs:
|
|||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
|
@ -21,4 +21,5 @@ nfs:
|
|||
shareStrategy: "setDatasetProperties"
|
||||
shareStrategySetDatasetProperties:
|
||||
properties:
|
||||
sharenfs: "on"
|
||||
#sharenfs: "on"
|
||||
sharenfs: "rw,no_subtree_check,no_root_squash"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,40 @@
|
|||
driver: zfs-generic-smb
|
||||
|
||||
sshConnection:
|
||||
host: ${SERVER_HOST}
|
||||
port: 22
|
||||
username: ${SERVER_USERNAME}
|
||||
password: ${SERVER_PASSWORD}
|
||||
|
||||
zfs:
|
||||
datasetParentName: tank/ci/${CI_BUILD_KEY}/v
|
||||
detachedSnapshotsDatasetParentName: tank/ci/${CI_BUILD_KEY}/s
|
||||
|
||||
datasetProperties:
|
||||
#aclmode: restricted
|
||||
#aclinherit: passthrough
|
||||
#acltype: nfsv4
|
||||
casesensitivity: insensitive
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0770"
|
||||
datasetPermissionsUser: smbroot
|
||||
datasetPermissionsGroup: smbroot
|
||||
|
||||
smb:
|
||||
shareHost: ${SERVER_HOST}
|
||||
shareStrategy: "setDatasetProperties"
|
||||
shareStrategySetDatasetProperties:
|
||||
properties:
|
||||
sharesmb: "on"
|
||||
|
||||
node:
|
||||
mount:
|
||||
mount_flags: "username=smbroot,password=smbroot"
|
||||
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
idHash:
|
||||
strategy: crc16
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1";
|
||||
|
||||
service Disk {
|
||||
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
|
||||
// disk devices enumerated by the host.
|
||||
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
|
||||
|
||||
// PartitionDisk initializes and partitions a disk device with the GPT partition style
|
||||
// (if the disk has not been partitioned already) and returns the resulting volume device ID.
|
||||
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
|
||||
|
||||
// Rescan refreshes the host's storage cache.
|
||||
rpc Rescan(RescanRequest) returns (RescanResponse) {}
|
||||
|
||||
// ListDiskIDs returns a map of DiskID objects where the key is the disk number.
|
||||
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
|
||||
|
||||
// GetDiskStats returns the stats of a disk (currently it returns the disk size).
|
||||
rpc GetDiskStats(GetDiskStatsRequest) returns (GetDiskStatsResponse) {}
|
||||
|
||||
// SetDiskState sets the offline/online state of a disk.
|
||||
rpc SetDiskState(SetDiskStateRequest) returns (SetDiskStateResponse) {}
|
||||
|
||||
// GetDiskState gets the offline/online state of a disk.
|
||||
rpc GetDiskState(GetDiskStateRequest) returns (GetDiskStateResponse) {}
|
||||
}
|
||||
|
||||
message ListDiskLocationsRequest {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message DiskLocation {
|
||||
string Adapter = 1;
|
||||
string Bus = 2;
|
||||
string Target = 3;
|
||||
string LUNID = 4;
|
||||
}
|
||||
|
||||
message ListDiskLocationsResponse {
|
||||
// Map of disk number and <adapter, bus, target, lun ID> associated with each disk device.
|
||||
map <uint32, DiskLocation> disk_locations = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskRequest {
|
||||
// Disk device number of the disk to partition.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RescanRequest {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RescanResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message ListDiskIDsRequest {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message DiskIDs {
|
||||
// The disk page83 id.
|
||||
string page83 = 1;
|
||||
// The disk serial number.
|
||||
string serial_number = 2;
|
||||
}
|
||||
|
||||
message ListDiskIDsResponse {
|
||||
// Map of disk numbers and disk identifiers associated with each disk device.
|
||||
map <uint32, DiskIDs> diskIDs = 1; // the case is intentional for protoc to generate the field as DiskIDs
|
||||
}
|
||||
|
||||
message GetDiskStatsRequest {
|
||||
// Disk device number of the disk to get the stats from.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetDiskStatsResponse {
|
||||
// Total size of the volume.
|
||||
int64 total_bytes = 1;
|
||||
}
|
||||
|
||||
message SetDiskStateRequest {
|
||||
// Disk device number of the disk.
|
||||
uint32 disk_number = 1;
|
||||
|
||||
// Online state to set for the disk. true for online, false for offline.
|
||||
bool is_online = 2;
|
||||
}
|
||||
|
||||
message SetDiskStateResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message GetDiskStateRequest {
|
||||
// Disk device number of the disk.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetDiskStateResponse {
|
||||
// Online state of the disk. true for online, false for offline.
|
||||
bool is_online = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,62 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha1;
|
||||
|
||||
service Disk {
|
||||
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
|
||||
// disk devices enumerated by the host
|
||||
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
|
||||
|
||||
// PartitionDisk initializes and partitions a disk device (if the disk has not
|
||||
// been partitioned already) and returns the resulting volume device ID
|
||||
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
|
||||
|
||||
// Rescan refreshes the host's storage cache
|
||||
rpc Rescan(RescanRequest) returns (RescanResponse) {}
|
||||
|
||||
// GetDiskNumberByName returns disk number based on the passing disk name information
|
||||
rpc GetDiskNumberByName(GetDiskNumberByNameRequest) returns (GetDiskNumberByNameResponse) {}
|
||||
}
|
||||
|
||||
message ListDiskLocationsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiskLocation {
|
||||
string Adapter = 1;
|
||||
string Bus = 2;
|
||||
string Target = 3;
|
||||
string LUNID = 4;
|
||||
}
|
||||
|
||||
message ListDiskLocationsResponse {
|
||||
// Map of disk device IDs and <adapter, bus, target, lun ID> associated with each disk device
|
||||
map <string, DiskLocation> disk_locations = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskRequest {
|
||||
// Disk device ID of the disk to partition
|
||||
string diskID = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message RescanRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message RescanResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message GetDiskNumberByNameRequest {
|
||||
// Disk ID
|
||||
string disk_name = 1;
|
||||
}
|
||||
|
||||
message GetDiskNumberByNameResponse {
|
||||
// Disk number
|
||||
string disk_number = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,81 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1beta1";
|
||||
|
||||
service Disk {
|
||||
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
|
||||
// disk devices enumerated by the host
|
||||
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
|
||||
|
||||
// PartitionDisk initializes and partitions a disk device (if the disk has not
|
||||
// been partitioned already) and returns the resulting volume device ID
|
||||
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
|
||||
|
||||
// Rescan refreshes the host's storage cache
|
||||
rpc Rescan(RescanRequest) returns (RescanResponse) {}
|
||||
|
||||
// ListDiskIDs returns a map of DiskID objects where the key is the disk number
|
||||
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
|
||||
|
||||
// DiskStats returns the stats for the disk
|
||||
rpc DiskStats(DiskStatsRequest) returns (DiskStatsResponse) {}
|
||||
}
|
||||
|
||||
message ListDiskLocationsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiskLocation {
|
||||
string Adapter = 1;
|
||||
string Bus = 2;
|
||||
string Target = 3;
|
||||
string LUNID = 4;
|
||||
}
|
||||
|
||||
message ListDiskLocationsResponse {
|
||||
// Map of disk device IDs and <adapter, bus, target, lun ID> associated with each disk device
|
||||
map <string, DiskLocation> disk_locations = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskRequest {
|
||||
// Disk device ID of the disk to partition
|
||||
string diskID = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message RescanRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message RescanResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ListDiskIDsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiskIDs {
|
||||
// Map of Disk ID types and Disk ID values
|
||||
map <string, string> identifiers = 1;
|
||||
}
|
||||
|
||||
message ListDiskIDsResponse {
|
||||
// Map of disk device numbers and IDs <page83> associated with each disk device
|
||||
map <string, DiskIDs> diskIDs = 1;
|
||||
}
|
||||
|
||||
message DiskStatsRequest {
|
||||
// Disk device ID of the disk to get the size from
|
||||
string diskID = 1;
|
||||
}
|
||||
|
||||
message DiskStatsResponse {
|
||||
//Total size of the volume
|
||||
int64 diskSize = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,109 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta2;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1beta2";
|
||||
|
||||
service Disk {
|
||||
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
|
||||
// disk devices enumerated by the host
|
||||
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
|
||||
|
||||
// PartitionDisk initializes and partitions a disk device (if the disk has not
|
||||
// been partitioned already) and returns the resulting volume device ID
|
||||
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
|
||||
|
||||
// Rescan refreshes the host's storage cache
|
||||
rpc Rescan(RescanRequest) returns (RescanResponse) {}
|
||||
|
||||
// ListDiskIDs returns a map of DiskID objects where the key is the disk number
|
||||
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
|
||||
|
||||
// DiskStats returns the stats for the disk
|
||||
rpc DiskStats(DiskStatsRequest) returns (DiskStatsResponse) {}
|
||||
|
||||
// SetAttachState sets the offline/online state of a disk
|
||||
rpc SetAttachState(SetAttachStateRequest) returns (SetAttachStateResponse) {}
|
||||
|
||||
// GetAttachState gets the offline/online state of a disk
|
||||
rpc GetAttachState(GetAttachStateRequest) returns (GetAttachStateResponse) {}
|
||||
}
|
||||
|
||||
message ListDiskLocationsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiskLocation {
|
||||
string Adapter = 1;
|
||||
string Bus = 2;
|
||||
string Target = 3;
|
||||
string LUNID = 4;
|
||||
}
|
||||
|
||||
message ListDiskLocationsResponse {
|
||||
// Map of disk device IDs and <adapter, bus, target, lun ID> associated with each disk device
|
||||
map <string, DiskLocation> disk_locations = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskRequest {
|
||||
// Disk device ID of the disk to partition
|
||||
string diskID = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message RescanRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message RescanResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ListDiskIDsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiskIDs {
|
||||
// Map of Disk ID types and Disk ID values
|
||||
map <string, string> identifiers = 1;
|
||||
}
|
||||
|
||||
message ListDiskIDsResponse {
|
||||
// Map of disk device numbers and IDs <page83> associated with each disk device
|
||||
map <string, DiskIDs> diskIDs = 1;
|
||||
}
|
||||
|
||||
message DiskStatsRequest {
|
||||
// Disk device ID of the disk to get the size from
|
||||
string diskID = 1;
|
||||
}
|
||||
|
||||
message DiskStatsResponse {
|
||||
//Total size of the volume
|
||||
int64 diskSize = 1;
|
||||
}
|
||||
|
||||
message SetAttachStateRequest {
|
||||
// Disk device ID (number) of the disk which state will change
|
||||
string diskID = 1;
|
||||
|
||||
// Online state to set for the disk. true for online, false for offline
|
||||
bool isOnline = 2;
|
||||
}
|
||||
|
||||
message SetAttachStateResponse {
|
||||
}
|
||||
|
||||
message GetAttachStateRequest {
|
||||
// Disk device ID (number) of the disk
|
||||
string diskID = 1;
|
||||
}
|
||||
|
||||
message GetAttachStateResponse {
|
||||
// Online state of the disk. true for online, false for offline
|
||||
bool isOnline = 1;
|
||||
}
|
||||
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta3;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/disk/v1beta3";
|
||||
|
||||
service Disk {
|
||||
// ListDiskLocations returns locations <Adapter, Bus, Target, LUN ID> of all
|
||||
// disk devices enumerated by the host.
|
||||
rpc ListDiskLocations(ListDiskLocationsRequest) returns (ListDiskLocationsResponse) {}
|
||||
|
||||
// PartitionDisk initializes and partitions a disk device with the GPT partition style
|
||||
// (if the disk has not been partitioned already) and returns the resulting volume device ID.
|
||||
rpc PartitionDisk(PartitionDiskRequest) returns (PartitionDiskResponse) {}
|
||||
|
||||
// Rescan refreshes the host's storage cache.
|
||||
rpc Rescan(RescanRequest) returns (RescanResponse) {}
|
||||
|
||||
// ListDiskIDs returns a map of DiskID objects where the key is the disk number.
|
||||
rpc ListDiskIDs(ListDiskIDsRequest) returns (ListDiskIDsResponse) {}
|
||||
|
||||
// GetDiskStats returns the stats of a disk (currently it returns the disk size).
|
||||
rpc GetDiskStats(GetDiskStatsRequest) returns (GetDiskStatsResponse) {}
|
||||
|
||||
// SetDiskState sets the offline/online state of a disk.
|
||||
rpc SetDiskState(SetDiskStateRequest) returns (SetDiskStateResponse) {}
|
||||
|
||||
// GetDiskState gets the offline/online state of a disk.
|
||||
rpc GetDiskState(GetDiskStateRequest) returns (GetDiskStateResponse) {}
|
||||
}
|
||||
|
||||
message ListDiskLocationsRequest {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message DiskLocation {
|
||||
string Adapter = 1;
|
||||
string Bus = 2;
|
||||
string Target = 3;
|
||||
string LUNID = 4;
|
||||
}
|
||||
|
||||
message ListDiskLocationsResponse {
|
||||
// Map of disk number and <adapter, bus, target, lun ID> associated with each disk device.
|
||||
map <uint32, DiskLocation> disk_locations = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskRequest {
|
||||
// Disk device number of the disk to partition.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message PartitionDiskResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RescanRequest {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RescanResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message ListDiskIDsRequest {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message DiskIDs {
|
||||
// The disk page83 id.
|
||||
string page83 = 1;
|
||||
// The disk serial number.
|
||||
string serial_number = 2;
|
||||
}
|
||||
|
||||
message ListDiskIDsResponse {
|
||||
// Map of disk numbers and disk identifiers associated with each disk device.
|
||||
map <uint32, DiskIDs> diskIDs = 1; // the case is intentional for protoc to generate the field as DiskIDs
|
||||
}
|
||||
|
||||
message GetDiskStatsRequest {
|
||||
// Disk device number of the disk to get the stats from.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetDiskStatsResponse {
|
||||
// Total size of the volume.
|
||||
int64 total_bytes = 1;
|
||||
}
|
||||
|
||||
message SetDiskStateRequest {
|
||||
// Disk device number of the disk.
|
||||
uint32 disk_number = 1;
|
||||
|
||||
// Online state to set for the disk. true for online, false for offline.
|
||||
bool is_online = 2;
|
||||
}
|
||||
|
||||
message SetDiskStateResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message GetDiskStateRequest {
|
||||
// Disk device number of the disk.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetDiskStateResponse {
|
||||
// Online state of the disk. true for online, false for offline.
|
||||
bool is_online = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,17 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package api;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api";
|
||||
|
||||
// CommandError details errors yielded by cmdlet calls.
|
||||
message CmdletError {
|
||||
// Name of the cmdlet that errored out.
|
||||
string cmdlet_name = 1;
|
||||
|
||||
// Error code that got returned.
|
||||
uint32 code = 2;
|
||||
|
||||
// Human-readable error message - can be empty.
|
||||
string message = 3;
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v1";
|
||||
|
||||
service Filesystem {
|
||||
// PathExists checks if the requested path exists in the host filesystem.
|
||||
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
|
||||
|
||||
// Mkdir creates a directory at the requested path in the host filesystem.
|
||||
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
|
||||
|
||||
// Rmdir removes the directory at the requested path in the host filesystem.
|
||||
// This may be used for unlinking a symlink created through CreateSymlink.
|
||||
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
|
||||
|
||||
// CreateSymlink creates a symbolic link called target_path that points to source_path
|
||||
// in the host filesystem (target_path is the name of the symbolic link created,
|
||||
// source_path is the existing path).
|
||||
rpc CreateSymlink(CreateSymlinkRequest) returns (CreateSymlinkResponse) {}
|
||||
|
||||
// IsSymlink checks if a given path is a symlink.
|
||||
rpc IsSymlink(IsSymlinkRequest) returns (IsSymlinkResponse) {}
|
||||
}
|
||||
|
||||
message PathExistsRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message PathExistsResponse {
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool exists = 1;
|
||||
}
|
||||
|
||||
message MkdirRequest {
|
||||
// The path to create in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
// Non-existent parent directories in the path will be automatically created.
|
||||
// Directories will be created with Read and Write privileges of the Windows
|
||||
// User account under which csi-proxy is started (typically LocalSystem).
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// The path parameter cannot already exist in the host's filesystem.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message MkdirResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RmdirRequest {
|
||||
// The path to remove in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Path cannot be a file of type symlink.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Force remove all contents under path (if any).
|
||||
bool force = 2;
|
||||
}
|
||||
|
||||
message RmdirResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message CreateSymlinkRequest {
|
||||
// The path of the existing directory to be linked.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs needs to match the paths specified as
|
||||
// kubelet-csi-plugins-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// source_path cannot already exist in the host filesystem.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string source_path = 1;
|
||||
|
||||
// Target path is the location of the new directory entry to be created in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs to match the paths specified as
|
||||
// kubelet-pod-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// target_path needs to exist as a directory in the host that is empty.
|
||||
// target_path cannot be a symbolic link.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message CreateSymlinkResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message IsSymlinkRequest {
|
||||
// The path whose existence as a symlink we want to check in the host's filesystem.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message IsSymlinkResponse {
|
||||
// Indicates whether the path in IsSymlinkRequest is a symlink.
|
||||
bool is_symlink = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha1;
|
||||
|
||||
service Filesystem {
|
||||
// PathExists checks if the requested path exists in the host's filesystem
|
||||
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
|
||||
|
||||
// Mkdir creates a directory at the requested path in the host's filesystem
|
||||
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
|
||||
|
||||
// Rmdir removes the directory at the requested path in the host's filesystem.
|
||||
// This may be used for unlinking a symlink created through LinkPath
|
||||
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
|
||||
|
||||
// LinkPath creates a local directory symbolic link between a source path
|
||||
// and target path in the host's filesystem
|
||||
rpc LinkPath(LinkPathRequest) returns (LinkPathResponse) {}
|
||||
|
||||
//IsMountPoint checks if a given path is mount or not
|
||||
rpc IsMountPoint(IsMountPointRequest) returns (IsMountPointResponse) {}
|
||||
}
|
||||
|
||||
// Context of the paths used for path prefix validation
|
||||
enum PathContext {
|
||||
// Indicates the kubelet-csi-plugins-path parameter of csi-proxy be used as
|
||||
// the path context. This may be used while handling NodeStageVolume where
|
||||
// a volume may need to be mounted at a plugin-specific path like:
|
||||
// kubelet\plugins\kubernetes.io\csi\pv\<pv-name>\globalmount
|
||||
PLUGIN = 0;
|
||||
// Indicates the kubelet-pod-path parameter of csi-proxy be used as the path
|
||||
// context. This may be used while handling NodePublishVolume where a staged
|
||||
// volume may be need to be symlinked to a pod-specific path like:
|
||||
// kubelet\pods\<pod-uuid>\volumes\kubernetes.io~csi\<pvc-name>\mount
|
||||
POD = 1;
|
||||
}
|
||||
|
||||
message PathExistsRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
|
||||
// Context of the path parameter.
|
||||
// This is used to validate prefix for absolute paths passed
|
||||
PathContext context = 2;
|
||||
}
|
||||
|
||||
message PathExistsResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool exists = 2;
|
||||
}
|
||||
|
||||
message MkdirRequest {
|
||||
// The path to create in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
// Non-existent parent directories in the path will be automatically created.
|
||||
// Directories will be created with Read and Write privileges of the Windows
|
||||
// User account under which csi-proxy is started (typically LocalSystem).
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// The path parameter cannot already exist in the host's filesystem.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Context of the path parameter.
|
||||
// This is used to validate prefix for absolute paths passed
|
||||
PathContext context = 2;
|
||||
}
|
||||
|
||||
message MkdirResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message RmdirRequest {
|
||||
// The path to remove in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Path cannot be a file of type symlink.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Context of the path parameter.
|
||||
// This is used to validate prefix for absolute paths passed
|
||||
PathContext context = 2;
|
||||
|
||||
// Force remove all contents under path (if any).
|
||||
bool force = 3;
|
||||
}
|
||||
|
||||
message RmdirResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message LinkPathRequest {
|
||||
// The path where the symlink is created in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs needs to match the paths specified as
|
||||
// kubelet-csi-plugins-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// source_path cannot already exist in the host filesystem.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string source_path = 1;
|
||||
|
||||
// Target path in the host's filesystem used for the symlink creation.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs to match the paths specified as
|
||||
// kubelet-pod-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// target_path needs to exist as a directory in the host that is empty.
|
||||
// target_path cannot be a symbolic link.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message LinkPathResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message IsMountPointRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message IsMountPointResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool is_mount_point = 2;
|
||||
}
|
||||
|
|
@ -0,0 +1,168 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta1;
|
||||
|
||||
service Filesystem {
|
||||
// PathExists checks if the requested path exists in the host's filesystem
|
||||
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
|
||||
|
||||
// Mkdir creates a directory at the requested path in the host's filesystem
|
||||
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
|
||||
|
||||
// Rmdir removes the directory at the requested path in the host's filesystem.
|
||||
// This may be used for unlinking a symlink created through LinkPath
|
||||
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
|
||||
|
||||
// LinkPath creates a local directory symbolic link between a source path
|
||||
// and target path in the host's filesystem
|
||||
rpc LinkPath(LinkPathRequest) returns (LinkPathResponse) {}
|
||||
|
||||
//IsMountPoint checks if a given path is mount or not
|
||||
rpc IsMountPoint(IsMountPointRequest) returns (IsMountPointResponse) {}
|
||||
}
|
||||
|
||||
// Context of the paths used for path prefix validation
|
||||
enum PathContext {
|
||||
// Indicates the kubelet-csi-plugins-path parameter of csi-proxy be used as
|
||||
// the path context. This may be used while handling NodeStageVolume where
|
||||
// a volume may need to be mounted at a plugin-specific path like:
|
||||
// kubelet\plugins\kubernetes.io\csi\pv\<pv-name>\globalmount
|
||||
PLUGIN = 0;
|
||||
// Indicates the kubelet-pod-path parameter of csi-proxy be used as the path
|
||||
// context. This may be used while handling NodePublishVolume where a staged
|
||||
// volume may be need to be symlinked to a pod-specific path like:
|
||||
// kubelet\pods\<pod-uuid>\volumes\kubernetes.io~csi\<pvc-name>\mount
|
||||
POD = 1;
|
||||
}
|
||||
|
||||
message PathExistsRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
|
||||
// Context of the path parameter.
|
||||
// This is used to validate prefix for absolute paths passed
|
||||
PathContext context = 2;
|
||||
}
|
||||
|
||||
message PathExistsResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool exists = 2;
|
||||
}
|
||||
|
||||
message MkdirRequest {
|
||||
// The path to create in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
// Non-existent parent directories in the path will be automatically created.
|
||||
// Directories will be created with Read and Write privileges of the Windows
|
||||
// User account under which csi-proxy is started (typically LocalSystem).
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// The path parameter cannot already exist in the host's filesystem.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Context of the path parameter.
|
||||
// This is used to validate prefix for absolute paths passed
|
||||
PathContext context = 2;
|
||||
}
|
||||
|
||||
message MkdirResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message RmdirRequest {
|
||||
// The path to remove in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Path cannot be a file of type symlink.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Context of the path parameter.
|
||||
// This is used to validate prefix for absolute paths passed
|
||||
PathContext context = 2;
|
||||
|
||||
// Force remove all contents under path (if any).
|
||||
bool force = 3;
|
||||
}
|
||||
|
||||
message RmdirResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message LinkPathRequest {
|
||||
// The path where the symlink is created in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs needs to match the paths specified as
|
||||
// kubelet-csi-plugins-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// source_path cannot already exist in the host filesystem.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string source_path = 1;
|
||||
|
||||
// Target path in the host's filesystem used for the symlink creation.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs to match the paths specified as
|
||||
// kubelet-pod-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// target_path needs to exist as a directory in the host that is empty.
|
||||
// target_path cannot be a symbolic link.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message LinkPathResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
message IsMountPointRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message IsMountPointResponse {
|
||||
// Error message if any. Empty string indicates success
|
||||
string error = 1;
|
||||
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool is_mount_point = 2;
|
||||
}
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta2;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v1beta2";
|
||||
|
||||
service Filesystem {
|
||||
// PathExists checks if the requested path exists in the host filesystem.
|
||||
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
|
||||
|
||||
// Mkdir creates a directory at the requested path in the host filesystem.
|
||||
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
|
||||
|
||||
// Rmdir removes the directory at the requested path in the host filesystem.
|
||||
// This may be used for unlinking a symlink created through CreateSymlink.
|
||||
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
|
||||
|
||||
// CreateSymlink creates a symbolic link called target_path that points to source_path
|
||||
// in the host filesystem (target_path is the name of the symbolic link created,
|
||||
// source_path is the existing path).
|
||||
rpc CreateSymlink(CreateSymlinkRequest) returns (CreateSymlinkResponse) {}
|
||||
|
||||
// IsSymlink checks if a given path is a symlink.
|
||||
rpc IsSymlink(IsSymlinkRequest) returns (IsSymlinkResponse) {}
|
||||
}
|
||||
|
||||
message PathExistsRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message PathExistsResponse {
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool exists = 1;
|
||||
}
|
||||
|
||||
message MkdirRequest {
|
||||
// The path to create in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
// Non-existent parent directories in the path will be automatically created.
|
||||
// Directories will be created with Read and Write privileges of the Windows
|
||||
// User account under which csi-proxy is started (typically LocalSystem).
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// The path parameter cannot already exist in the host's filesystem.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message MkdirResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RmdirRequest {
|
||||
// The path to remove in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Path cannot be a file of type symlink.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Force remove all contents under path (if any).
|
||||
bool force = 2;
|
||||
}
|
||||
|
||||
message RmdirResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message CreateSymlinkRequest {
|
||||
// The path of the existing directory to be linked.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs needs to match the paths specified as
|
||||
// kubelet-csi-plugins-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// source_path cannot already exist in the host filesystem.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string source_path = 1;
|
||||
|
||||
// Target path is the location of the new directory entry to be created in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs to match the paths specified as
|
||||
// kubelet-pod-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// target_path needs to exist as a directory in the host that is empty.
|
||||
// target_path cannot be a symbolic link.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message CreateSymlinkResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message IsSymlinkRequest {
|
||||
// The path whose existence as a symlink we want to check in the host's filesystem.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message IsSymlinkResponse {
|
||||
// Indicates whether the path in IsSymlinkRequest is a symlink.
|
||||
bool is_symlink = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,163 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v2alpha1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/filesystem/v2alpha1";
|
||||
|
||||
service Filesystem {
|
||||
// PathExists checks if the requested path exists in the host filesystem.
|
||||
rpc PathExists(PathExistsRequest) returns (PathExistsResponse) {}
|
||||
|
||||
// Mkdir creates a directory at the requested path in the host filesystem.
|
||||
rpc Mkdir(MkdirRequest) returns (MkdirResponse) {}
|
||||
|
||||
// Rmdir removes the directory at the requested path in the host filesystem.
|
||||
// This may be used for unlinking a symlink created through CreateSymlink.
|
||||
rpc Rmdir(RmdirRequest) returns (RmdirResponse) {}
|
||||
|
||||
// RmdirContents removes the contents of a directory in the host filesystem.
|
||||
// Unlike Rmdir it won't delete the requested path, it'll only delete its contents.
|
||||
rpc RmdirContents(RmdirContentsRequest) returns (RmdirContentsResponse) {}
|
||||
|
||||
// CreateSymlink creates a symbolic link called target_path that points to source_path
|
||||
// in the host filesystem (target_path is the name of the symbolic link created,
|
||||
// source_path is the existing path).
|
||||
rpc CreateSymlink(CreateSymlinkRequest) returns (CreateSymlinkResponse) {}
|
||||
|
||||
// IsSymlink checks if a given path is a symlink.
|
||||
rpc IsSymlink(IsSymlinkRequest) returns (IsSymlinkResponse) {}
|
||||
}
|
||||
|
||||
message PathExistsRequest {
|
||||
// The path whose existence we want to check in the host's filesystem
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message PathExistsResponse {
|
||||
// Indicates whether the path in PathExistsRequest exists in the host's filesystem
|
||||
bool exists = 1;
|
||||
}
|
||||
|
||||
message MkdirRequest {
|
||||
// The path to create in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
// Non-existent parent directories in the path will be automatically created.
|
||||
// Directories will be created with Read and Write privileges of the Windows
|
||||
// User account under which csi-proxy is started (typically LocalSystem).
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// The path parameter cannot already exist in the host's filesystem.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message MkdirResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RmdirRequest {
|
||||
// The path to remove in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Path cannot be a file of type symlink.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
|
||||
// Force remove all contents under path (if any).
|
||||
bool force = 2;
|
||||
}
|
||||
|
||||
message RmdirResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message RmdirContentsRequest {
|
||||
// The path whose contents will be removed in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// Depending on the context parameter of this function, the path prefix needs
|
||||
// to match the paths specified either as kubelet-csi-plugins-path
|
||||
// or as kubelet-pod-path parameters of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// Path cannot be a file of type symlink.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message RmdirContentsResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message CreateSymlinkRequest {
|
||||
// The path of the existing directory to be linked.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs needs to match the paths specified as
|
||||
// kubelet-csi-plugins-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// source_path cannot already exist in the host filesystem.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string source_path = 1;
|
||||
|
||||
// Target path is the location of the new directory entry to be created in the host's filesystem.
|
||||
// All special characters allowed by Windows in path names will be allowed
|
||||
// except for restrictions noted below. For details, please check:
|
||||
// https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file
|
||||
//
|
||||
// Restrictions:
|
||||
// Only absolute path (indicated by a drive letter prefix: e.g. "C:\") is accepted.
|
||||
// The path prefix needs to match the paths specified as
|
||||
// kubelet-pod-path parameter of csi-proxy.
|
||||
// UNC paths of the form "\\server\share\path\file" are not allowed.
|
||||
// All directory separators need to be backslash character: "\".
|
||||
// Characters: .. / : | ? * in the path are not allowed.
|
||||
// target_path needs to exist as a directory in the host that is empty.
|
||||
// target_path cannot be a symbolic link.
|
||||
// Maximum path length will be capped to 260 characters.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message CreateSymlinkResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message IsSymlinkRequest {
|
||||
// The path whose existence as a symlink we want to check in the host's filesystem.
|
||||
string path = 1;
|
||||
}
|
||||
|
||||
message IsSymlinkResponse {
|
||||
// Indicates whether the path in IsSymlinkRequest is a symlink.
|
||||
bool is_symlink = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,153 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/iscsi/v1alpha1";
|
||||
|
||||
service Iscsi {
|
||||
// AddTargetPortal registers an iSCSI target network address for later
|
||||
// discovery.
|
||||
// AddTargetPortal currently does not support selecting different NICs or
|
||||
// a different iSCSI initiator (e.g a hardware initiator). This means that
|
||||
// Windows will select the initiator NIC and instance on its own.
|
||||
rpc AddTargetPortal(AddTargetPortalRequest)
|
||||
returns (AddTargetPortalResponse) {}
|
||||
|
||||
// DiscoverTargetPortal initiates discovery on an iSCSI target network address
|
||||
// and returns discovered IQNs.
|
||||
rpc DiscoverTargetPortal(DiscoverTargetPortalRequest)
|
||||
returns (DiscoverTargetPortalResponse) {}
|
||||
|
||||
// RemoveTargetPortal removes an iSCSI target network address registration.
|
||||
rpc RemoveTargetPortal(RemoveTargetPortalRequest)
|
||||
returns (RemoveTargetPortalResponse) {}
|
||||
|
||||
// ListTargetPortal lists all currently registered iSCSI target network
|
||||
// addresses.
|
||||
rpc ListTargetPortals(ListTargetPortalsRequest)
|
||||
returns (ListTargetPortalsResponse) {}
|
||||
|
||||
// ConnectTarget connects to an iSCSI Target
|
||||
rpc ConnectTarget(ConnectTargetRequest) returns (ConnectTargetResponse) {}
|
||||
|
||||
// DisconnectTarget disconnects from an iSCSI Target
|
||||
rpc DisconnectTarget(DisconnectTargetRequest)
|
||||
returns (DisconnectTargetResponse) {}
|
||||
|
||||
// GetTargetDisks returns the disk addresses that correspond to an iSCSI
|
||||
// target
|
||||
rpc GetTargetDisks(GetTargetDisksRequest) returns (GetTargetDisksResponse) {}
|
||||
}
|
||||
|
||||
// TargetPortal is an address and port pair for a specific iSCSI storage
|
||||
// target.
|
||||
message TargetPortal {
|
||||
// iSCSI Target (server) address
|
||||
string target_address = 1;
|
||||
|
||||
// iSCSI Target port (default iSCSI port is 3260)
|
||||
uint32 target_port = 2;
|
||||
}
|
||||
|
||||
message AddTargetPortalRequest {
|
||||
// iSCSI Target Portal to register in the initiator
|
||||
TargetPortal target_portal = 1;
|
||||
}
|
||||
|
||||
message AddTargetPortalResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiscoverTargetPortalRequest {
|
||||
// iSCSI Target Portal on which to initiate discovery
|
||||
TargetPortal target_portal = 1;
|
||||
}
|
||||
|
||||
message DiscoverTargetPortalResponse {
|
||||
// List of discovered IQN addresses
|
||||
// follows IQN format: iqn.yyyy-mm.naming-authority:unique-name
|
||||
repeated string iqns = 1;
|
||||
}
|
||||
|
||||
message RemoveTargetPortalRequest {
|
||||
// iSCSI Target Portal
|
||||
TargetPortal target_portal = 1;
|
||||
}
|
||||
|
||||
message RemoveTargetPortalResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ListTargetPortalsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ListTargetPortalsResponse {
|
||||
// A list of Target Portals currently registered in the initiator
|
||||
repeated TargetPortal target_portals = 1;
|
||||
}
|
||||
|
||||
enum AuthenticationType {
|
||||
// No authentication is used
|
||||
NONE = 0;
|
||||
|
||||
// One way CHAP authentication. The target authenticates the initiator.
|
||||
ONE_WAY_CHAP = 1;
|
||||
|
||||
// Mutual CHAP authentication. The target and initiator authenticate each
|
||||
// other.
|
||||
MUTUAL_CHAP = 2;
|
||||
}
|
||||
|
||||
message ConnectTargetRequest {
|
||||
// Target portal to which the initiator will connect
|
||||
TargetPortal target_portal = 1;
|
||||
|
||||
// IQN of the iSCSI Target
|
||||
string iqn = 2;
|
||||
|
||||
// Connection authentication type, None by default
|
||||
//
|
||||
// One Way Chap uses the chap_username and chap_secret
|
||||
// fields mentioned below to authenticate the initiator.
|
||||
//
|
||||
// Mutual Chap uses both the user/secret mentioned below
|
||||
// and the Initiator Chap Secret to authenticate the target and initiator.
|
||||
AuthenticationType auth_type = 3;
|
||||
|
||||
// CHAP Username used to authenticate the initiator
|
||||
string chap_username = 4;
|
||||
|
||||
// CHAP password used to authenticate the initiator
|
||||
string chap_secret = 5;
|
||||
}
|
||||
|
||||
message ConnectTargetResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message GetTargetDisksRequest {
|
||||
// Target portal whose disks will be queried
|
||||
TargetPortal target_portal = 1;
|
||||
|
||||
// IQN of the iSCSI Target
|
||||
string iqn = 2;
|
||||
}
|
||||
|
||||
message GetTargetDisksResponse {
|
||||
// List composed of disk ids (numbers) that are associated with the
|
||||
// iSCSI target
|
||||
repeated string diskIDs = 1;
|
||||
}
|
||||
|
||||
message DisconnectTargetRequest {
|
||||
// Target portal from which initiator will disconnect
|
||||
TargetPortal target_portal = 1;
|
||||
|
||||
// IQN of the iSCSI Target
|
||||
string iqn = 2;
|
||||
}
|
||||
|
||||
message DisconnectTargetResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
|
@ -0,0 +1,175 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha2;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/iscsi/v1alpha2";
|
||||
|
||||
service Iscsi {
|
||||
// AddTargetPortal registers an iSCSI target network address for later
|
||||
// discovery.
|
||||
// AddTargetPortal currently does not support selecting different NICs or
|
||||
// a different iSCSI initiator (e.g a hardware initiator). This means that
|
||||
// Windows will select the initiator NIC and instance on its own.
|
||||
rpc AddTargetPortal(AddTargetPortalRequest)
|
||||
returns (AddTargetPortalResponse) {}
|
||||
|
||||
// DiscoverTargetPortal initiates discovery on an iSCSI target network address
|
||||
// and returns discovered IQNs.
|
||||
rpc DiscoverTargetPortal(DiscoverTargetPortalRequest)
|
||||
returns (DiscoverTargetPortalResponse) {}
|
||||
|
||||
// RemoveTargetPortal removes an iSCSI target network address registration.
|
||||
rpc RemoveTargetPortal(RemoveTargetPortalRequest)
|
||||
returns (RemoveTargetPortalResponse) {}
|
||||
|
||||
// ListTargetPortal lists all currently registered iSCSI target network
|
||||
// addresses.
|
||||
rpc ListTargetPortals(ListTargetPortalsRequest)
|
||||
returns (ListTargetPortalsResponse) {}
|
||||
|
||||
// ConnectTarget connects to an iSCSI Target
|
||||
rpc ConnectTarget(ConnectTargetRequest) returns (ConnectTargetResponse) {}
|
||||
|
||||
// DisconnectTarget disconnects from an iSCSI Target
|
||||
rpc DisconnectTarget(DisconnectTargetRequest)
|
||||
returns (DisconnectTargetResponse) {}
|
||||
|
||||
// GetTargetDisks returns the disk addresses that correspond to an iSCSI
|
||||
// target
|
||||
rpc GetTargetDisks(GetTargetDisksRequest) returns (GetTargetDisksResponse) {}
|
||||
|
||||
// SetMutualChapSecret sets the default CHAP secret that all initiators on
|
||||
// this machine (node) use to authenticate the target on mutual CHAP
|
||||
// authentication.
|
||||
// NOTE: This method affects global node state and should only be used
|
||||
// with consideration to other CSI drivers that run concurrently.
|
||||
rpc SetMutualChapSecret(SetMutualChapSecretRequest)
|
||||
returns (SetMutualChapSecretResponse) {}
|
||||
}
|
||||
|
||||
// TargetPortal is an address and port pair for a specific iSCSI storage
|
||||
// target.
|
||||
message TargetPortal {
|
||||
// iSCSI Target (server) address
|
||||
string target_address = 1;
|
||||
|
||||
// iSCSI Target port (default iSCSI port is 3260)
|
||||
uint32 target_port = 2;
|
||||
}
|
||||
|
||||
message AddTargetPortalRequest {
|
||||
// iSCSI Target Portal to register in the initiator
|
||||
TargetPortal target_portal = 1;
|
||||
}
|
||||
|
||||
message AddTargetPortalResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DiscoverTargetPortalRequest {
|
||||
// iSCSI Target Portal on which to initiate discovery
|
||||
TargetPortal target_portal = 1;
|
||||
}
|
||||
|
||||
message DiscoverTargetPortalResponse {
|
||||
// List of discovered IQN addresses
|
||||
// follows IQN format: iqn.yyyy-mm.naming-authority:unique-name
|
||||
repeated string iqns = 1;
|
||||
}
|
||||
|
||||
message RemoveTargetPortalRequest {
|
||||
// iSCSI Target Portal
|
||||
TargetPortal target_portal = 1;
|
||||
}
|
||||
|
||||
message RemoveTargetPortalResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ListTargetPortalsRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ListTargetPortalsResponse {
|
||||
// A list of Target Portals currently registered in the initiator
|
||||
repeated TargetPortal target_portals = 1;
|
||||
}
|
||||
|
||||
// iSCSI logon authentication type
|
||||
enum AuthenticationType {
|
||||
// No authentication is used
|
||||
NONE = 0;
|
||||
|
||||
// One way CHAP authentication. The target authenticates the initiator.
|
||||
ONE_WAY_CHAP = 1;
|
||||
|
||||
// Mutual CHAP authentication. The target and initiator authenticate each
|
||||
// other.
|
||||
MUTUAL_CHAP = 2;
|
||||
}
|
||||
|
||||
message ConnectTargetRequest {
|
||||
// Target portal to which the initiator will connect
|
||||
TargetPortal target_portal = 1;
|
||||
|
||||
// IQN of the iSCSI Target
|
||||
string iqn = 2;
|
||||
|
||||
// Connection authentication type, None by default
|
||||
//
|
||||
// One Way Chap uses the chap_username and chap_secret
|
||||
// fields mentioned below to authenticate the initiator.
|
||||
//
|
||||
// Mutual Chap uses both the user/secret mentioned below
|
||||
// and the Initiator Chap Secret (See `SetMutualChapSecret`)
|
||||
// to authenticate the target and initiator.
|
||||
AuthenticationType auth_type = 3;
|
||||
|
||||
// CHAP Username used to authenticate the initiator
|
||||
string chap_username = 4;
|
||||
|
||||
// CHAP password used to authenticate the initiator
|
||||
string chap_secret = 5;
|
||||
}
|
||||
|
||||
message ConnectTargetResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message GetTargetDisksRequest {
|
||||
// Target portal whose disks will be queried
|
||||
TargetPortal target_portal = 1;
|
||||
|
||||
// IQN of the iSCSI Target
|
||||
string iqn = 2;
|
||||
}
|
||||
|
||||
message GetTargetDisksResponse {
|
||||
// List composed of disk ids (numbers) that are associated with the
|
||||
// iSCSI target
|
||||
repeated string diskIDs = 1;
|
||||
}
|
||||
|
||||
message DisconnectTargetRequest {
|
||||
// Target portal from which initiator will disconnect
|
||||
TargetPortal target_portal = 1;
|
||||
|
||||
// IQN of the iSCSI Target
|
||||
string iqn = 2;
|
||||
}
|
||||
|
||||
message DisconnectTargetResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message SetMutualChapSecretRequest {
|
||||
// the default CHAP secret that all initiators on this machine (node) use to
|
||||
// authenticate the target on mutual CHAP authentication.
|
||||
// Must be at least 12 byte long for non-Ipsec connections, at least one
|
||||
// byte long for Ipsec connections, and at most 16 bytes long.
|
||||
string MutualChapSecret = 1;
|
||||
}
|
||||
|
||||
message SetMutualChapSecretResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/smb/v1";
|
||||
|
||||
service Smb {
|
||||
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
|
||||
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
|
||||
|
||||
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
|
||||
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
|
||||
}
|
||||
|
||||
|
||||
message NewSmbGlobalMappingRequest {
|
||||
// A remote SMB share to mount
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
|
||||
// Optional local path to mount the smb on
|
||||
string local_path = 2;
|
||||
|
||||
// Username credential associated with the share
|
||||
string username = 3;
|
||||
|
||||
// Password credential associated with the share
|
||||
string password = 4;
|
||||
}
|
||||
|
||||
message NewSmbGlobalMappingResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
|
||||
message RemoveSmbGlobalMappingRequest {
|
||||
// A remote SMB share mapping to remove
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
}
|
||||
|
||||
message RemoveSmbGlobalMappingResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha1;
|
||||
|
||||
service Smb {
|
||||
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
|
||||
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
|
||||
|
||||
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
|
||||
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
|
||||
}
|
||||
|
||||
|
||||
message NewSmbGlobalMappingRequest {
|
||||
// A remote SMB share to mount
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
// Optional local path to mount the smb on
|
||||
string local_path = 2;
|
||||
|
||||
// Username credential associated with the share
|
||||
string username = 3;
|
||||
|
||||
// Password credential associated with the share
|
||||
string password = 4;
|
||||
}
|
||||
|
||||
message NewSmbGlobalMappingResponse {
|
||||
// Windows error code
|
||||
// Success is represented as 0
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
|
||||
message RemoveSmbGlobalMappingRequest {
|
||||
// A remote SMB share mapping to remove
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
}
|
||||
|
||||
message RemoveSmbGlobalMappingResponse {
|
||||
// Windows error code
|
||||
// Success is represented as 0
|
||||
string error = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta1;
|
||||
|
||||
service Smb {
|
||||
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
|
||||
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
|
||||
|
||||
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
|
||||
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
|
||||
}
|
||||
|
||||
|
||||
message NewSmbGlobalMappingRequest {
|
||||
// A remote SMB share to mount
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
// Optional local path to mount the smb on
|
||||
string local_path = 2;
|
||||
|
||||
// Username credential associated with the share
|
||||
string username = 3;
|
||||
|
||||
// Password credential associated with the share
|
||||
string password = 4;
|
||||
}
|
||||
|
||||
message NewSmbGlobalMappingResponse {
|
||||
// Windows error code
|
||||
// Success is represented as 0
|
||||
string error = 1;
|
||||
}
|
||||
|
||||
|
||||
message RemoveSmbGlobalMappingRequest {
|
||||
// A remote SMB share mapping to remove
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
}
|
||||
|
||||
message RemoveSmbGlobalMappingResponse {
|
||||
// Windows error code
|
||||
// Success is represented as 0
|
||||
string error = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta2;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/smb/v1beta2";
|
||||
|
||||
service Smb {
|
||||
// NewSmbGlobalMapping creates an SMB mapping on the SMB client to an SMB share.
|
||||
rpc NewSmbGlobalMapping(NewSmbGlobalMappingRequest) returns (NewSmbGlobalMappingResponse) {}
|
||||
|
||||
// RemoveSmbGlobalMapping removes the SMB mapping to an SMB share.
|
||||
rpc RemoveSmbGlobalMapping(RemoveSmbGlobalMappingRequest) returns (RemoveSmbGlobalMappingResponse) {}
|
||||
}
|
||||
|
||||
|
||||
message NewSmbGlobalMappingRequest {
|
||||
// A remote SMB share to mount
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB remote path specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
|
||||
// Optional local path to mount the smb on
|
||||
string local_path = 2;
|
||||
|
||||
// Username credential associated with the share
|
||||
string username = 3;
|
||||
|
||||
// Password credential associated with the share
|
||||
string password = 4;
|
||||
}
|
||||
|
||||
message NewSmbGlobalMappingResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
|
||||
message RemoveSmbGlobalMappingRequest {
|
||||
// A remote SMB share mapping to remove
|
||||
// All unicode characters allowed in SMB server name specifications are
|
||||
// permitted except for restrictions below
|
||||
//
|
||||
// Restrictions:
|
||||
// SMB share specified in the format: \\server-name\sharename, \\server.fqdn\sharename or \\a.b.c.d\sharename
|
||||
// If not an IP address, share name has to be a valid DNS name.
|
||||
// UNC specifications to local paths or prefix: \\?\ is not allowed.
|
||||
// Characters: + [ ] " / : ; | < > , ? * = $ are not allowed.
|
||||
string remote_path = 1;
|
||||
}
|
||||
|
||||
message RemoveSmbGlobalMappingResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
|
@ -0,0 +1,93 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/system/v1alpha1";
|
||||
|
||||
service System {
|
||||
// GetBIOSSerialNumber returns the device's serial number
|
||||
rpc GetBIOSSerialNumber(GetBIOSSerialNumberRequest)
|
||||
returns (GetBIOSSerialNumberResponse) {}
|
||||
|
||||
// StartService starts a Windows service
|
||||
// NOTE: This method affects global node state and should only be used
|
||||
// with consideration to other CSI drivers that run concurrently.
|
||||
rpc StartService(StartServiceRequest) returns (StartServiceResponse) {}
|
||||
|
||||
// StopService stops a Windows service
|
||||
// NOTE: This method affects global node state and should only be used
|
||||
// with consideration to other CSI drivers that run concurrently.
|
||||
rpc StopService(StopServiceRequest) returns (StopServiceResponse) {}
|
||||
|
||||
// GetService queries a Windows service state
|
||||
rpc GetService(GetServiceRequest) returns (GetServiceResponse) {}
|
||||
}
|
||||
|
||||
message GetBIOSSerialNumberRequest {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message GetBIOSSerialNumberResponse {
|
||||
// Serial number
|
||||
string serial_number = 1;
|
||||
}
|
||||
|
||||
message StartServiceRequest {
|
||||
// Service name (as listed in System\CCS\Services keys)
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message StartServiceResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message StopServiceRequest {
|
||||
// Service name (as listed in System\CCS\Services keys)
|
||||
string name = 1;
|
||||
|
||||
// Forces stopping of services that has dependent services
|
||||
bool force = 2;
|
||||
}
|
||||
|
||||
message StopServiceResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winsvc/ns-winsvc-service_status#members
|
||||
enum ServiceStatus {
|
||||
UNKNOWN = 0;
|
||||
STOPPED = 1;
|
||||
START_PENDING = 2;
|
||||
STOP_PENDING = 3;
|
||||
RUNNING = 4;
|
||||
CONTINUE_PENDING = 5;
|
||||
PAUSE_PENDING = 6;
|
||||
PAUSED = 7;
|
||||
}
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winsvc/nf-winsvc-changeserviceconfiga
|
||||
enum StartType {
|
||||
BOOT = 0;
|
||||
SYSTEM = 1;
|
||||
AUTOMATIC = 2;
|
||||
MANUAL = 3;
|
||||
DISABLED = 4;
|
||||
}
|
||||
|
||||
message GetServiceRequest {
|
||||
// Service name (as listed in System\CCS\Services keys)
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message GetServiceResponse {
|
||||
// Service display name
|
||||
string display_name = 1;
|
||||
|
||||
// Service start type.
|
||||
// Used to control whether a service will start on boot, and if so on which
|
||||
// boot phase.
|
||||
StartType start_type = 2;
|
||||
|
||||
// Service status, e.g. stopped, running, paused
|
||||
ServiceStatus status = 3;
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1";
|
||||
|
||||
service Volume {
|
||||
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for all volumes from a
|
||||
// given disk number and partition number (optional)
|
||||
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
|
||||
|
||||
// MountVolume mounts the volume at the requested global staging path.
|
||||
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
|
||||
|
||||
// UnmountVolume flushes data cache to disk and removes the global staging path.
|
||||
rpc UnmountVolume(UnmountVolumeRequest) returns (UnmountVolumeResponse) {}
|
||||
|
||||
// IsVolumeFormatted checks if a volume is formatted.
|
||||
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
|
||||
|
||||
// FormatVolume formats a volume with NTFS.
|
||||
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
|
||||
|
||||
// ResizeVolume performs resizing of the partition and file system for a block based volume.
|
||||
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
|
||||
|
||||
// GetVolumeStats gathers total bytes and used bytes for a volume.
|
||||
rpc GetVolumeStats(GetVolumeStatsRequest) returns (GetVolumeStatsResponse) {}
|
||||
|
||||
// GetDiskNumberFromVolumeID gets the disk number of the disk where the volume is located.
|
||||
rpc GetDiskNumberFromVolumeID(GetDiskNumberFromVolumeIDRequest) returns (GetDiskNumberFromVolumeIDResponse ) {}
|
||||
|
||||
// GetVolumeIDFromTargetPath gets the volume id for a given target path.
|
||||
rpc GetVolumeIDFromTargetPath(GetVolumeIDFromTargetPathRequest) returns (GetVolumeIDFromTargetPathResponse) {}
|
||||
|
||||
// WriteVolumeCache write volume cache to disk.
|
||||
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskRequest {
|
||||
// Disk device number of the disk to query for volumes.
|
||||
uint32 disk_number = 1;
|
||||
// The partition number (optional), by default it uses the first partition of the disk.
|
||||
uint32 partition_number = 2;
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskResponse {
|
||||
// Volume device IDs of volumes on the specified disk.
|
||||
repeated string volume_ids = 1;
|
||||
}
|
||||
|
||||
message MountVolumeRequest {
|
||||
// Volume device ID of the volume to mount.
|
||||
string volume_id = 1;
|
||||
// Path in the host's file system where the volume needs to be mounted.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message MountVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message UnmountVolumeRequest {
|
||||
// Volume device ID of the volume to dismount.
|
||||
string volume_id = 1;
|
||||
// Path where the volume has been mounted.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message UnmountVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message IsVolumeFormattedRequest {
|
||||
// Volume device ID of the volume to check.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message IsVolumeFormattedResponse {
|
||||
// Is the volume formatted with NTFS.
|
||||
bool formatted = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeRequest {
|
||||
// Volume device ID of the volume to format.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message ResizeVolumeRequest {
|
||||
// Volume device ID of the volume to resize.
|
||||
string volume_id = 1;
|
||||
// New size in bytes of the volume.
|
||||
int64 size_bytes = 2;
|
||||
}
|
||||
|
||||
message ResizeVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message GetVolumeStatsRequest{
|
||||
// Volume device Id of the volume to get the stats for.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetVolumeStatsResponse{
|
||||
// Total bytes
|
||||
int64 total_bytes = 1;
|
||||
// Used bytes
|
||||
int64 used_bytes = 2;
|
||||
}
|
||||
|
||||
message GetDiskNumberFromVolumeIDRequest {
|
||||
// Volume device ID of the volume to get the disk number for.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetDiskNumberFromVolumeIDResponse {
|
||||
// Corresponding disk number.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetVolumeIDFromTargetPathRequest {
|
||||
// The target path.
|
||||
string target_path = 1;
|
||||
}
|
||||
|
||||
message GetVolumeIDFromTargetPathResponse {
|
||||
// The volume device ID.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheRequest {
|
||||
// Volume device ID of the volume to flush the cache.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1alpha1;
|
||||
|
||||
service Volume {
|
||||
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for
|
||||
// all volumes on a Disk device
|
||||
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
|
||||
// MountVolume mounts the volume at the requested global staging path
|
||||
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
|
||||
// DismountVolume gracefully dismounts a volume
|
||||
rpc DismountVolume(DismountVolumeRequest) returns (DismountVolumeResponse) {}
|
||||
// IsVolumeFormatted checks if a volume is formatted with NTFS
|
||||
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
|
||||
// FormatVolume formats a volume with the provided file system
|
||||
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
|
||||
// ResizeVolume performs resizing of the partition and file system for a block based volume
|
||||
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
|
||||
}
|
||||
message ListVolumesOnDiskRequest {
|
||||
// Disk device ID of the disk to query for volumes
|
||||
string disk_id = 1;
|
||||
}
|
||||
message ListVolumesOnDiskResponse {
|
||||
// Volume device IDs of volumes on the specified disk
|
||||
repeated string volume_ids = 1;
|
||||
}
|
||||
message MountVolumeRequest {
|
||||
// Volume device ID of the volume to mount
|
||||
string volume_id = 1;
|
||||
// Path in the host's file system where the volume needs to be mounted
|
||||
string path = 2;
|
||||
}
|
||||
message MountVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
message DismountVolumeRequest {
|
||||
// Volume device ID of the volume to dismount
|
||||
string volume_id = 1;
|
||||
// Path where the volume has been mounted.
|
||||
string path = 2;
|
||||
}
|
||||
message DismountVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
message IsVolumeFormattedRequest {
|
||||
// Volume device ID of the volume to check
|
||||
string volume_id = 1;
|
||||
}
|
||||
message IsVolumeFormattedResponse {
|
||||
// Is the volume formatted with NTFS
|
||||
bool formatted = 1;
|
||||
}
|
||||
message FormatVolumeRequest {
|
||||
// Volume device ID of the volume to format
|
||||
string volume_id = 1;
|
||||
}
|
||||
message FormatVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
message ResizeVolumeRequest {
|
||||
// Volume device ID of the volume to dismount
|
||||
string volume_id = 1;
|
||||
// New size of the volume
|
||||
int64 size = 2;
|
||||
}
|
||||
message ResizeVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
|
@ -0,0 +1,121 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1beta1";
|
||||
|
||||
service Volume {
|
||||
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for
|
||||
// all volumes on a Disk device
|
||||
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
|
||||
// MountVolume mounts the volume at the requested global staging path
|
||||
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
|
||||
// DismountVolume gracefully dismounts a volume
|
||||
rpc DismountVolume(DismountVolumeRequest) returns (DismountVolumeResponse) {}
|
||||
// IsVolumeFormatted checks if a volume is formatted with NTFS
|
||||
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
|
||||
// FormatVolume formats a volume with the provided file system
|
||||
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
|
||||
// ResizeVolume performs resizing of the partition and file system for a block based volume
|
||||
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
|
||||
// VolumeStats gathers DiskSize, VolumeSize and VolumeUsedSize for a volume
|
||||
rpc VolumeStats(VolumeStatsRequest) returns (VolumeStatsResponse) {}
|
||||
// GetVolumeDiskNumber gets the disk number of the disk where the volume is located
|
||||
rpc GetVolumeDiskNumber(VolumeDiskNumberRequest) returns (VolumeDiskNumberResponse) {}
|
||||
// GetVolumeIDFromMount gets the volume id for a given mount
|
||||
rpc GetVolumeIDFromMount(VolumeIDFromMountRequest) returns (VolumeIDFromMountResponse) {}
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskRequest {
|
||||
// Disk device ID of the disk to query for volumes
|
||||
string disk_id = 1;
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskResponse {
|
||||
// Volume device IDs of volumes on the specified disk
|
||||
repeated string volume_ids = 1;
|
||||
}
|
||||
|
||||
message MountVolumeRequest {
|
||||
// Volume device ID of the volume to mount
|
||||
string volume_id = 1;
|
||||
// Path in the host's file system where the volume needs to be mounted
|
||||
string path = 2;
|
||||
}
|
||||
|
||||
message MountVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DismountVolumeRequest {
|
||||
// Volume device ID of the volume to dismount
|
||||
string volume_id = 1;
|
||||
// Path where the volume has been mounted.
|
||||
string path = 2;
|
||||
}
|
||||
|
||||
message DismountVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message IsVolumeFormattedRequest {
|
||||
// Volume device ID of the volume to check
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message IsVolumeFormattedResponse {
|
||||
// Is the volume formatted with NTFS
|
||||
bool formatted = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeRequest {
|
||||
// Volume device ID of the volume to format
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ResizeVolumeRequest {
|
||||
// Volume device ID of the volume to dismount
|
||||
string volume_id = 1;
|
||||
// New size of the volume
|
||||
int64 size = 2;
|
||||
}
|
||||
|
||||
message ResizeVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message VolumeStatsRequest{
|
||||
// Volume device Id of the volume to get the stats for
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message VolumeStatsResponse{
|
||||
// Capacity of the volume
|
||||
int64 volumeSize = 1;
|
||||
// Used bytes
|
||||
int64 volumeUsedSize = 2;
|
||||
}
|
||||
|
||||
message VolumeDiskNumberRequest{
|
||||
// Volume device Id of the volume to get the disk number for
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message VolumeDiskNumberResponse{
|
||||
// Corresponding disk number
|
||||
int64 diskNumber = 1;
|
||||
}
|
||||
|
||||
message VolumeIDFromMountRequest {
|
||||
// Mount
|
||||
string mount = 1;
|
||||
}
|
||||
|
||||
message VolumeIDFromMountResponse {
|
||||
// Mount
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
|
@ -0,0 +1,132 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta2;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1beta2";
|
||||
|
||||
service Volume {
|
||||
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for
|
||||
// all volumes on a Disk device
|
||||
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
|
||||
// MountVolume mounts the volume at the requested global staging path
|
||||
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
|
||||
// DismountVolume gracefully dismounts a volume
|
||||
rpc DismountVolume(DismountVolumeRequest) returns (DismountVolumeResponse) {}
|
||||
// IsVolumeFormatted checks if a volume is formatted with NTFS
|
||||
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
|
||||
// FormatVolume formats a volume with the provided file system
|
||||
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
|
||||
// ResizeVolume performs resizing of the partition and file system for a block based volume
|
||||
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
|
||||
// VolumeStats gathers DiskSize, VolumeSize and VolumeUsedSize for a volume
|
||||
rpc VolumeStats(VolumeStatsRequest) returns (VolumeStatsResponse) {}
|
||||
// GetVolumeDiskNumber gets the disk number of the disk where the volume is located
|
||||
rpc GetVolumeDiskNumber(VolumeDiskNumberRequest) returns (VolumeDiskNumberResponse) {}
|
||||
// GetVolumeIDFromMount gets the volume id for a given mount
|
||||
rpc GetVolumeIDFromMount(VolumeIDFromMountRequest) returns (VolumeIDFromMountResponse) {}
|
||||
// WriteVolumeCache write volume cache to disk
|
||||
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskRequest {
|
||||
// Disk device ID of the disk to query for volumes
|
||||
string disk_id = 1;
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskResponse {
|
||||
// Volume device IDs of volumes on the specified disk
|
||||
repeated string volume_ids = 1;
|
||||
}
|
||||
|
||||
message MountVolumeRequest {
|
||||
// Volume device ID of the volume to mount
|
||||
string volume_id = 1;
|
||||
// Path in the host's file system where the volume needs to be mounted
|
||||
string path = 2;
|
||||
}
|
||||
|
||||
message MountVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message DismountVolumeRequest {
|
||||
// Volume device ID of the volume to dismount
|
||||
string volume_id = 1;
|
||||
// Path where the volume has been mounted.
|
||||
string path = 2;
|
||||
}
|
||||
|
||||
message DismountVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message IsVolumeFormattedRequest {
|
||||
// Volume device ID of the volume to check
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message IsVolumeFormattedResponse {
|
||||
// Is the volume formatted with NTFS
|
||||
bool formatted = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeRequest {
|
||||
// Volume device ID of the volume to format
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message ResizeVolumeRequest {
|
||||
// Volume device ID of the volume to dismount
|
||||
string volume_id = 1;
|
||||
// New size of the volume
|
||||
int64 size = 2;
|
||||
}
|
||||
|
||||
message ResizeVolumeResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
||||
message VolumeStatsRequest{
|
||||
// Volume device Id of the volume to get the stats for
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message VolumeStatsResponse{
|
||||
// Capacity of the volume
|
||||
int64 volumeSize = 1;
|
||||
// Used bytes
|
||||
int64 volumeUsedSize = 2;
|
||||
}
|
||||
|
||||
message VolumeDiskNumberRequest{
|
||||
// Volume device Id of the volume to get the disk number for
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message VolumeDiskNumberResponse{
|
||||
// Corresponding disk number
|
||||
int64 diskNumber = 1;
|
||||
}
|
||||
|
||||
message VolumeIDFromMountRequest {
|
||||
// Mount
|
||||
string mount = 1;
|
||||
}
|
||||
|
||||
message VolumeIDFromMountResponse {
|
||||
// Mount
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheRequest {
|
||||
// Volume device ID of the volume to flush the cache
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheResponse {
|
||||
// Intentionally empty
|
||||
}
|
||||
|
|
@ -0,0 +1,143 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v1beta3;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v1beta3";
|
||||
|
||||
service Volume {
|
||||
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for all volumes from a
|
||||
// given disk number and partition number (optional)
|
||||
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
|
||||
|
||||
// MountVolume mounts the volume at the requested global staging path.
|
||||
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
|
||||
|
||||
// UnmountVolume flushes data cache to disk and removes the global staging path.
|
||||
rpc UnmountVolume(UnmountVolumeRequest) returns (UnmountVolumeResponse) {}
|
||||
|
||||
// IsVolumeFormatted checks if a volume is formatted.
|
||||
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
|
||||
|
||||
// FormatVolume formats a volume with NTFS.
|
||||
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
|
||||
|
||||
// ResizeVolume performs resizing of the partition and file system for a block based volume.
|
||||
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
|
||||
|
||||
// GetVolumeStats gathers total bytes and used bytes for a volume.
|
||||
rpc GetVolumeStats(GetVolumeStatsRequest) returns (GetVolumeStatsResponse) {}
|
||||
|
||||
// GetDiskNumberFromVolumeID gets the disk number of the disk where the volume is located.
|
||||
rpc GetDiskNumberFromVolumeID(GetDiskNumberFromVolumeIDRequest) returns (GetDiskNumberFromVolumeIDResponse ) {}
|
||||
|
||||
// GetVolumeIDFromTargetPath gets the volume id for a given target path.
|
||||
rpc GetVolumeIDFromTargetPath(GetVolumeIDFromTargetPathRequest) returns (GetVolumeIDFromTargetPathResponse) {}
|
||||
|
||||
// WriteVolumeCache write volume cache to disk.
|
||||
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskRequest {
|
||||
// Disk device number of the disk to query for volumes.
|
||||
uint32 disk_number = 1;
|
||||
// The partition number (optional), by default it uses the first partition of the disk.
|
||||
uint32 partition_number = 2;
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskResponse {
|
||||
// Volume device IDs of volumes on the specified disk.
|
||||
repeated string volume_ids = 1;
|
||||
}
|
||||
|
||||
message MountVolumeRequest {
|
||||
// Volume device ID of the volume to mount.
|
||||
string volume_id = 1;
|
||||
// Path in the host's file system where the volume needs to be mounted.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message MountVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message UnmountVolumeRequest {
|
||||
// Volume device ID of the volume to dismount.
|
||||
string volume_id = 1;
|
||||
// Path where the volume has been mounted.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message UnmountVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message IsVolumeFormattedRequest {
|
||||
// Volume device ID of the volume to check.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message IsVolumeFormattedResponse {
|
||||
// Is the volume formatted with NTFS.
|
||||
bool formatted = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeRequest {
|
||||
// Volume device ID of the volume to format.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message ResizeVolumeRequest {
|
||||
// Volume device ID of the volume to resize.
|
||||
string volume_id = 1;
|
||||
// New size in bytes of the volume.
|
||||
int64 size_bytes = 2;
|
||||
}
|
||||
|
||||
message ResizeVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message GetVolumeStatsRequest{
|
||||
// Volume device Id of the volume to get the stats for.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetVolumeStatsResponse{
|
||||
// Total bytes
|
||||
int64 total_bytes = 1;
|
||||
// Used bytes
|
||||
int64 used_bytes = 2;
|
||||
}
|
||||
|
||||
message GetDiskNumberFromVolumeIDRequest {
|
||||
// Volume device ID of the volume to get the disk number for.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetDiskNumberFromVolumeIDResponse {
|
||||
// Corresponding disk number.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetVolumeIDFromTargetPathRequest {
|
||||
// The target path.
|
||||
string target_path = 1;
|
||||
}
|
||||
|
||||
message GetVolumeIDFromTargetPathResponse {
|
||||
// The volume device ID.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheRequest {
|
||||
// Volume device ID of the volume to flush the cache.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package v2alpha1;
|
||||
|
||||
option go_package = "github.com/kubernetes-csi/csi-proxy/client/api/volume/v2alpha1";
|
||||
|
||||
service Volume {
|
||||
// ListVolumesOnDisk returns the volume IDs (in \\.\Volume{GUID} format) for all volumes from a
|
||||
// given disk number and partition number (optional)
|
||||
rpc ListVolumesOnDisk(ListVolumesOnDiskRequest) returns (ListVolumesOnDiskResponse) {}
|
||||
|
||||
// MountVolume mounts the volume at the requested global staging path.
|
||||
rpc MountVolume(MountVolumeRequest) returns (MountVolumeResponse) {}
|
||||
|
||||
// UnmountVolume flushes data cache to disk and removes the global staging path.
|
||||
rpc UnmountVolume(UnmountVolumeRequest) returns (UnmountVolumeResponse) {}
|
||||
|
||||
// IsVolumeFormatted checks if a volume is formatted.
|
||||
rpc IsVolumeFormatted(IsVolumeFormattedRequest) returns (IsVolumeFormattedResponse) {}
|
||||
|
||||
// FormatVolume formats a volume with NTFS.
|
||||
rpc FormatVolume(FormatVolumeRequest) returns (FormatVolumeResponse) {}
|
||||
|
||||
// ResizeVolume performs resizing of the partition and file system for a block based volume.
|
||||
rpc ResizeVolume(ResizeVolumeRequest) returns (ResizeVolumeResponse) {}
|
||||
|
||||
// GetVolumeStats gathers total bytes and used bytes for a volume.
|
||||
rpc GetVolumeStats(GetVolumeStatsRequest) returns (GetVolumeStatsResponse) {}
|
||||
|
||||
// GetDiskNumberFromVolumeID gets the disk number of the disk where the volume is located.
|
||||
rpc GetDiskNumberFromVolumeID(GetDiskNumberFromVolumeIDRequest) returns (GetDiskNumberFromVolumeIDResponse ) {}
|
||||
|
||||
// GetVolumeIDFromTargetPath gets the volume id for a given target path.
|
||||
rpc GetVolumeIDFromTargetPath(GetVolumeIDFromTargetPathRequest) returns (GetVolumeIDFromTargetPathResponse) {}
|
||||
|
||||
// GetClosestVolumeIDFromTargetPath gets the closest volume id for a given target path
|
||||
// by following symlinks and moving up in the filesystem, if after moving up in the filesystem
|
||||
// we get to a DriveLetter then the volume corresponding to this drive letter is returned instead.
|
||||
rpc GetClosestVolumeIDFromTargetPath(GetClosestVolumeIDFromTargetPathRequest) returns (GetClosestVolumeIDFromTargetPathResponse) {}
|
||||
|
||||
// WriteVolumeCache write volume cache to disk.
|
||||
rpc WriteVolumeCache(WriteVolumeCacheRequest) returns (WriteVolumeCacheResponse) {}
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskRequest {
|
||||
// Disk device number of the disk to query for volumes.
|
||||
uint32 disk_number = 1;
|
||||
// The partition number (optional), by default it uses the first partition of the disk.
|
||||
uint32 partition_number = 2;
|
||||
}
|
||||
|
||||
message ListVolumesOnDiskResponse {
|
||||
// Volume device IDs of volumes on the specified disk.
|
||||
repeated string volume_ids = 1;
|
||||
}
|
||||
|
||||
message MountVolumeRequest {
|
||||
// Volume device ID of the volume to mount.
|
||||
string volume_id = 1;
|
||||
// Path in the host's file system where the volume needs to be mounted.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message MountVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message UnmountVolumeRequest {
|
||||
// Volume device ID of the volume to dismount.
|
||||
string volume_id = 1;
|
||||
// Path where the volume has been mounted.
|
||||
string target_path = 2;
|
||||
}
|
||||
|
||||
message UnmountVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message IsVolumeFormattedRequest {
|
||||
// Volume device ID of the volume to check.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message IsVolumeFormattedResponse {
|
||||
// Is the volume formatted with NTFS.
|
||||
bool formatted = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeRequest {
|
||||
// Volume device ID of the volume to format.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message FormatVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message ResizeVolumeRequest {
|
||||
// Volume device ID of the volume to resize.
|
||||
string volume_id = 1;
|
||||
// New size in bytes of the volume.
|
||||
int64 size_bytes = 2;
|
||||
}
|
||||
|
||||
message ResizeVolumeResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
||||
message GetVolumeStatsRequest{
|
||||
// Volume device Id of the volume to get the stats for.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetVolumeStatsResponse{
|
||||
// Total bytes
|
||||
int64 total_bytes = 1;
|
||||
// Used bytes
|
||||
int64 used_bytes = 2;
|
||||
}
|
||||
|
||||
message GetDiskNumberFromVolumeIDRequest {
|
||||
// Volume device ID of the volume to get the disk number for.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetDiskNumberFromVolumeIDResponse {
|
||||
// Corresponding disk number.
|
||||
uint32 disk_number = 1;
|
||||
}
|
||||
|
||||
message GetVolumeIDFromTargetPathRequest {
|
||||
// The target path.
|
||||
string target_path = 1;
|
||||
}
|
||||
|
||||
message GetVolumeIDFromTargetPathResponse {
|
||||
// The volume device ID.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message GetClosestVolumeIDFromTargetPathRequest {
|
||||
// The target path.
|
||||
string target_path = 1;
|
||||
}
|
||||
|
||||
message GetClosestVolumeIDFromTargetPathResponse {
|
||||
// The volume device ID.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheRequest {
|
||||
// Volume device ID of the volume to flush the cache.
|
||||
string volume_id = 1;
|
||||
}
|
||||
|
||||
message WriteVolumeCacheResponse {
|
||||
// Intentionally empty.
|
||||
}
|
||||
|
|
@ -2,4 +2,4 @@
|
|||
|
||||
# https://engineering.docker.com/2019/07/road-to-containing-iscsi/
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin" iscsiadm "${@:1}"
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" iscsiadm "${@:1}"
|
||||
|
|
|
|||
16
docker/mount
16
docker/mount
|
|
@ -20,16 +20,18 @@ container_supported_filesystems=(
|
|||
while getopts "t:" opt; do
|
||||
case "$opt" in
|
||||
t)
|
||||
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
|
||||
if [[ "x${USE_HOST_MOUNT_TOOLS}" == "x" ]]; then
|
||||
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}"
|
||||
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]]; then
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" mount "${@:1}"
|
||||
else
|
||||
/usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" mount "${@:1}"
|
||||
/usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" mount "${@:1}"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/sbin:/usr/bin" multipath "${@:1}"
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" multipath "${@:1}"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" oneclient "${@:1}"
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" oneclient "${@:1}"
|
||||
|
|
|
|||
|
|
@ -20,16 +20,18 @@ container_supported_filesystems=(
|
|||
while getopts "t:" opt; do
|
||||
case "$opt" in
|
||||
t)
|
||||
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
|
||||
if [[ "x${USE_HOST_MOUNT_TOOLS}" == "x" ]]; then
|
||||
[[ "${OPTARG,,}" == "zfs" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "lustre" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
[[ "${OPTARG,,}" == "onedata" ]] && USE_HOST_MOUNT_TOOLS=1
|
||||
#(printf '%s\0' "${container_supported_filesystems[@]}" | grep -Fqxz -- "${OPTARG}") || USE_HOST_MOUNT_TOOLS=1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]];then
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" umount "${@:1}"
|
||||
if [[ ${USE_HOST_MOUNT_TOOLS} -eq 1 ]]; then
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" umount "${@:1}"
|
||||
else
|
||||
/usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" umount "${@:1}"
|
||||
/usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" umount "${@:1}"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" zfs "${@:1}"
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" zfs "${@:1}"
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/bash
|
||||
|
||||
chroot /host /usr/bin/env -i PATH="/sbin:/bin:/usr/bin:/usr/sbin" zpool "${@:1}"
|
||||
chroot /host /usr/bin/env -i PATH="/usr/sbin:/usr/bin:/sbin:/bin" zpool "${@:1}"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,138 @@
|
|||
# Storage Class Parameters
|
||||
|
||||
Some drivers support different settings for volumes. These can be configured via the driver configuration and/or storage
|
||||
classes.
|
||||
|
||||
## `synology-iscsi`
|
||||
The `synology-iscsi` driver supports several storage class parameters. Note however that not all parameters/values are
|
||||
supported for all backing file systems and LUN type. The following options are available:
|
||||
|
||||
### Configure Storage Classes
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-iscsi
|
||||
parameters:
|
||||
fsType: ext4
|
||||
# The following options affect the LUN representing the volume. These options are passed directly to the Synology API.
|
||||
# The following options are known.
|
||||
lunTemplate: |
|
||||
type: BLUN # Btrfs thin provisioning
|
||||
type: BLUN_THICK # Btrfs thick provisioning
|
||||
type: THIN # Ext4 thin provisioning
|
||||
type: ADV # Ext4 thin provisioning with legacy advanced feature set
|
||||
type: FILE # Ext4 thick provisioning
|
||||
description: Some Description
|
||||
|
||||
# Only for thick provisioned volumes. Known values:
|
||||
# 0: Buffered Writes
|
||||
# 3: Direct Write
|
||||
direct_io_pattern: 0
|
||||
|
||||
# Device Attributes. See below for more info
|
||||
dev_attribs:
|
||||
- dev_attrib: emulate_tpws
|
||||
enable: 1
|
||||
- ...
|
||||
|
||||
# The following options affect the iSCSI target. These options will be passed directly to the Synology API.
|
||||
# The following options are known.
|
||||
targetTemplate: |
|
||||
has_header_checksum: false
|
||||
has_data_checksum: false
|
||||
|
||||
# Note that this option requires a compatible filesystem. Use 0 for unlimited sessions.
|
||||
max_sessions: 0
|
||||
multi_sessions: true
|
||||
max_recv_seg_bytes: 262144
|
||||
max_send_seg_bytes: 262144
|
||||
|
||||
# Use this to disable authentication. To configure authentication see below
|
||||
auth_type: 0
|
||||
```
|
||||
|
||||
#### About LUN Types
|
||||
The availability of the different types of LUNs depends on the filesystem used on your Synology volume. For Btrfs volumes
|
||||
you can use `BLUN` and `BLUN_THICK` volumes. For Ext4 volumes you can use `THIN`, `ADV` or `FILE` volumes. These
|
||||
correspond to the options available in the UI.
|
||||
|
||||
#### About `dev_attribs`
|
||||
Most of the LUN options are configured via the `dev_attribs` list. This list can be specified both in the `lunTemplate`
|
||||
of the global configuration and in the `lunTemplate` of the `StorageClass`. If both lists are present they will be merged
|
||||
(with the `StorageClass` taking precedence). The following `dev_attribs` are known to work:
|
||||
|
||||
- `emulate_tpws`: Hardware-assisted zeroing
|
||||
- `emulate_caw`: Hardware-assisted locking
|
||||
- `emulate_3pc`: Hardware-assisted data transfer
|
||||
- `emulate_tpu`: Space Reclamation
|
||||
- `emulate_fua_write`: Enable the FUA iSCSI command (DSM 7+)
|
||||
- `emulate_sync_cache`: Enable the Sync Cache iSCSI command (DSM 7+)
|
||||
- `can_snapshot`: Enable snapshots for this volume. Only works for thin provisioned volumes.
|
||||
|
||||
### Configure Snapshot Classes
|
||||
`synology-iscsi` can also configure different parameters on snapshot classes:
|
||||
|
||||
```yaml
|
||||
apiVersion: snapshot.storage.k8s.io/v1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: synology-iscsi-snapshot
|
||||
parameters:
|
||||
# This inline yaml object will be passed to the Synology API when creating the snapshot.
|
||||
lunSnapshotTemplate: |
|
||||
is_locked: true
|
||||
|
||||
# https://kb.synology.com/en-me/DSM/tutorial/What_is_file_system_consistent_snapshot
|
||||
# Note that app consistent snapshots require a working Synology Storage Console. Otherwise both values will have
|
||||
# equivalent behavior.
|
||||
is_app_consistent: true
|
||||
...
|
||||
```
|
||||
|
||||
Note that it is currently not supported by Synology devices to restore a snapshot onto a different volume. You can
|
||||
create volumes from snapshots, but you should use the same `StorageClass` as the original volume of the snapshot did.
|
||||
|
||||
### Enabling CHAP Authentication
|
||||
You can enable CHAP Authentication for `StorageClass`es by supplying an appropriate `StorageClass` secret (see the
|
||||
[documentation](https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html) for more details). You
|
||||
can use the same password for alle volumes of a `StorageClass` or use different passwords per volume.
|
||||
|
||||
```yaml
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: synology-iscsi-chap
|
||||
parameters:
|
||||
fsType: ext4
|
||||
lunTemplate: |
|
||||
type: BLUN
|
||||
description: iSCSI volumes with CHAP Authentication
|
||||
secrets:
|
||||
# Use this to configure a single set of credentials for all volumes of this StorageClass
|
||||
csi.storage.k8s.io/provisioner-secret-name: chap-secret
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: default
|
||||
# Use substitutions to use different credentials for volumes based on the PVC
|
||||
csi.storage.k8s.io/provisioner-secret-name: "${pvc.name}-chap-secret"
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: "${pvc.namespace}"
|
||||
...
|
||||
---
|
||||
# Use a secret like this to supply CHAP credentials.
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: chap-secret
|
||||
stringData:
|
||||
# Client Credentials
|
||||
user: client
|
||||
password: MySecretPassword
|
||||
# Mutual CHAP Credentials. If these are specified mutual CHAP will be enabled.
|
||||
mutualUser: server
|
||||
mutualPassword: MyOtherPassword
|
||||
```
|
||||
|
||||
Note that CHAP authentication will only be enabled if the secret contains a username and password. If e.g. a password is
|
||||
missing CHAP authentication will not be enabled (but the volume will still be created). You cannot automatically
|
||||
enable/disable CHAP or change the password after the volume has been created.
|
||||
|
||||
If the secret itself is referenced but not present, the volume will not be created.
|
||||
|
|
@ -43,6 +43,8 @@ zfs:
|
|||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
|
||||
# not supported yet
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
#- "-m u:kube:full_set:allow"
|
||||
|
|
|
|||
|
|
@ -34,9 +34,10 @@ zfs:
|
|||
# "org.freenas:test": "{{ parameters.foo }}"
|
||||
# "org.freenas:test2": "some value"
|
||||
|
||||
datasetProperties:
|
||||
aclmode: restricted
|
||||
casesensitivity: mixed
|
||||
# these are managed automatically via the volume creation process when flagged as an smb volume
|
||||
#datasetProperties:
|
||||
# aclmode: restricted
|
||||
# casesensitivity: mixed
|
||||
|
||||
datasetParentName: tank/k8s/a/vols
|
||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||
|
|
@ -47,8 +48,10 @@ zfs:
|
|||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
datasetPermissionsAcls:
|
||||
- "-m everyone@:full_set:allow"
|
||||
|
||||
# not supported yet in api
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
#- "-m u:kube:full_set:allow"
|
||||
|
||||
smb:
|
||||
|
|
|
|||
|
|
@ -46,7 +46,9 @@ zfs:
|
|||
|
||||
datasetProperties:
|
||||
aclmode: restricted
|
||||
casesensitivity: mixed
|
||||
aclinherit: passthrough
|
||||
acltype: nfsv4
|
||||
casesensitivity: insensitive
|
||||
|
||||
datasetParentName: tank/k8s/a/vols
|
||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||
|
|
@ -54,12 +56,41 @@ zfs:
|
|||
detachedSnapshotsDatasetParentName: tank/k8s/a/snaps
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: nobody
|
||||
datasetPermissionsGroup: nobody
|
||||
datasetPermissionsMode: "0770"
|
||||
|
||||
# as appropriate create a dedicated user for smb connections
|
||||
# and set this
|
||||
datasetPermissionsUser: 65534
|
||||
datasetPermissionsGroup: 65534
|
||||
|
||||
# CORE
|
||||
#datasetPermissionsAclsBinary: setfacl
|
||||
|
||||
# SCALE
|
||||
#datasetPermissionsAclsBinary: nfs4xdr_setfacl
|
||||
|
||||
# if using a user other than guest/nobody comment the 'everyone@' acl
|
||||
# and uncomment the appropriate block below
|
||||
datasetPermissionsAcls:
|
||||
- "-m everyone@:full_set:allow"
|
||||
#- "-m u:kube:full_set:allow"
|
||||
- "-m everyone@:full_set:fd:allow"
|
||||
|
||||
# CORE
|
||||
# in CORE you cannot have multiple entries for the same principle
|
||||
# or said differently, they are declarative so using -m will replace
|
||||
# whatever the current value is for the principle rather than adding a
|
||||
# entry in the acl list
|
||||
#- "-m g:builtin_users:full_set:fd:allow"
|
||||
#- "-m group@:modify_set:fd:allow"
|
||||
#- "-m owner@:full_set:fd:allow"
|
||||
|
||||
# SCALE
|
||||
# https://www.truenas.com/community/threads/get-setfacl-on-scale-with-nfsv4-acls.95231/
|
||||
# -s replaces everything
|
||||
# so we put this in specific order to mimic the defaults of SCALE when using the api
|
||||
#- -s group:builtin_users:full_set:fd:allow
|
||||
#- -a group:builtin_users:modify_set:fd:allow
|
||||
#- -a group@:modify_set:fd:allow
|
||||
#- -a owner@:full_set:fd:allow
|
||||
|
||||
smb:
|
||||
shareHost: server address
|
||||
|
|
@ -77,7 +108,7 @@ smb:
|
|||
shareAllowedHosts: []
|
||||
shareDeniedHosts: []
|
||||
#shareDefaultPermissions: true
|
||||
shareGuestOk: true
|
||||
shareGuestOk: false
|
||||
#shareGuestOnly: true
|
||||
#shareShowHiddenFiles: true
|
||||
shareRecycleBin: true
|
||||
|
|
|
|||
|
|
@ -6,5 +6,5 @@ local-hostpath:
|
|||
shareBasePath: "/var/lib/csi-local-hostpath"
|
||||
controllerBasePath: "/var/lib/csi-local-hostpath"
|
||||
dirPermissionsMode: "0777"
|
||||
dirPermissionsUser: root
|
||||
dirPermissionsGroup: root
|
||||
dirPermissionsUser: 0
|
||||
dirPermissionsGroup: 0
|
||||
|
|
|
|||
|
|
@ -2,6 +2,9 @@
|
|||
|
||||
node:
|
||||
mount:
|
||||
# predominantly used to facilitate testing
|
||||
# mount_flags should generally be defined in storage classes, etc
|
||||
mount_flags: ""
|
||||
# should fsck be executed before mounting the fs
|
||||
checkFilesystem:
|
||||
xfs:
|
||||
|
|
@ -27,3 +30,30 @@ node:
|
|||
# ...
|
||||
btrfs:
|
||||
customOptions: []
|
||||
|
||||
csiProxy:
|
||||
# should be left unset in most situation, will be auto-detected
|
||||
#enabled: true
|
||||
|
||||
# connection attributes can be set to grpc endpoint
|
||||
# ie: hostname:port, or /some/path, or \\.\pipe\foo
|
||||
# connection and version will use internal defaults and should generally be left unset
|
||||
services:
|
||||
filesystem:
|
||||
#version: v1
|
||||
#connection:
|
||||
disk:
|
||||
#version: v1
|
||||
#connection:
|
||||
volume:
|
||||
#version: v1
|
||||
#connection:
|
||||
smb:
|
||||
#version: v1
|
||||
#connection:
|
||||
system:
|
||||
#version: v1alpha1
|
||||
#connection:
|
||||
iscsi:
|
||||
#version: v1alpha2
|
||||
#connection:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
#
|
||||
# these SHOULD NOT be used
|
||||
# they are here for documentation purposes only and are likely to:
|
||||
# - be removed
|
||||
# - break things
|
||||
#
|
||||
|
||||
_private:
|
||||
csi:
|
||||
volume:
|
||||
derivedContext:
|
||||
# driver left blank is used to auto select
|
||||
driver: memory # strictly to facilitate testing
|
||||
#driver: kubernetes
|
||||
idHash:
|
||||
strategy: crc16
|
||||
#strategy: crc32
|
||||
#strategy: md5
|
||||
|
|
@ -10,9 +10,9 @@ httpConnection:
|
|||
session: "democratic-csi"
|
||||
serialize: true
|
||||
|
||||
synology:
|
||||
# choose the proper volume for your system
|
||||
volume: /volume1
|
||||
# Choose the DSM volume this driver operates on. The default value is /volume1.
|
||||
# synology:
|
||||
# volume: /volume1
|
||||
|
||||
iscsi:
|
||||
targetPortal: "server[:port]"
|
||||
|
|
@ -31,7 +31,12 @@ iscsi:
|
|||
# documented below are several blocks
|
||||
# pick the option appropriate for you based on what your backing fs is and desired features
|
||||
# you do not need to alter dev_attribs under normal circumstances but they may be altered in advanced use-cases
|
||||
# These options can also be configured per storage-class:
|
||||
# See https://github.com/democratic-csi/democratic-csi/blob/master/docs/storage-class-parameters.md
|
||||
lunTemplate:
|
||||
# can be static value or handlebars template
|
||||
#description: "{{ parameters.[csi.storage.k8s.io/pvc/namespace] }}-{{ parameters.[csi.storage.k8s.io/pvc/name] }}"
|
||||
|
||||
# btrfs thin provisioning
|
||||
type: "BLUN"
|
||||
# tpws = Hardware-assisted zeroing
|
||||
|
|
|
|||
|
|
@ -36,8 +36,8 @@ zfs:
|
|||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0777"
|
||||
datasetPermissionsUser: root
|
||||
datasetPermissionsGroup: root
|
||||
datasetPermissionsUser: 0
|
||||
datasetPermissionsGroup: 0
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
#- "-m u:kube:full_set:allow"
|
||||
|
|
@ -48,6 +48,7 @@ nfs:
|
|||
shareStrategy: "setDatasetProperties"
|
||||
shareStrategySetDatasetProperties:
|
||||
properties:
|
||||
#sharenfs: "rw,no_subtree_check,no_root_squash"
|
||||
sharenfs: "on"
|
||||
# share: ""
|
||||
shareHost: "server address"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,57 @@
|
|||
driver: zfs-generic-smb
|
||||
sshConnection:
|
||||
host: server address
|
||||
port: 22
|
||||
username: root
|
||||
# use either password or key
|
||||
password: ""
|
||||
privateKey: |
|
||||
-----BEGIN RSA PRIVATE KEY-----
|
||||
...
|
||||
-----END RSA PRIVATE KEY-----
|
||||
|
||||
zfs:
|
||||
# can be used to override defaults if necessary
|
||||
# the example below is useful for TrueNAS 12
|
||||
#cli:
|
||||
# sudoEnabled: true
|
||||
# paths:
|
||||
# zfs: /usr/local/sbin/zfs
|
||||
# zpool: /usr/local/sbin/zpool
|
||||
# sudo: /usr/local/bin/sudo
|
||||
# chroot: /usr/sbin/chroot
|
||||
|
||||
# can be used to set arbitrary values on the dataset/zvol
|
||||
# can use handlebars templates with the parameters from the storage class/CO
|
||||
datasetProperties:
|
||||
#aclmode: restricted
|
||||
#aclinherit: passthrough
|
||||
#acltype: nfsv4
|
||||
casesensitivity: insensitive
|
||||
|
||||
datasetParentName: tank/k8s/test
|
||||
# do NOT make datasetParentName and detachedSnapshotsDatasetParentName overlap
|
||||
# they may be siblings, but neither should be nested in the other
|
||||
detachedSnapshotsDatasetParentName: tanks/k8s/test-snapshots
|
||||
|
||||
datasetEnableQuotas: true
|
||||
datasetEnableReservation: false
|
||||
datasetPermissionsMode: "0770"
|
||||
datasetPermissionsUser: smbroot
|
||||
datasetPermissionsGroup: smbroot
|
||||
|
||||
#datasetPermissionsAclsBinary: nfs4_setfacl
|
||||
#datasetPermissionsAcls:
|
||||
#- "-m everyone@:full_set:allow"
|
||||
#- -s group@:modify_set:fd:allow
|
||||
#- -a owner@:full_set:fd:allow
|
||||
|
||||
smb:
|
||||
# https://docs.oracle.com/cd/E23824_01/html/821-1448/gayne.html
|
||||
# https://www.hiroom2.com/2016/05/18/ubuntu-16-04-share-zfs-storage-via-nfs-smb/
|
||||
shareStrategy: "setDatasetProperties"
|
||||
shareStrategySetDatasetProperties:
|
||||
properties:
|
||||
sharesmb: "on"
|
||||
# share: ""
|
||||
shareHost: "server address"
|
||||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "democratic-csi",
|
||||
"version": "1.6.3",
|
||||
"version": "1.7.0",
|
||||
"description": "kubernetes csi driver framework",
|
||||
"main": "bin/democratic-csi",
|
||||
"scripts": {
|
||||
|
|
@ -22,8 +22,9 @@
|
|||
"@grpc/proto-loader": "^0.6.0",
|
||||
"@kubernetes/client-node": "^0.16.3",
|
||||
"async-mutex": "^0.3.1",
|
||||
"axios": "^0.26.1",
|
||||
"axios": "^0.27.2",
|
||||
"bunyan": "^1.8.15",
|
||||
"fs-extra": "^10.1.0",
|
||||
"handlebars": "^4.7.7",
|
||||
"js-yaml": "^4.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
|
|
|
|||
|
|
@ -3,6 +3,8 @@ const { CsiBaseDriver } = require("../index");
|
|||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const cp = require("child_process");
|
||||
const fs = require("fs");
|
||||
const fse = require("fs-extra");
|
||||
const path = require("path");
|
||||
const semver = require("semver");
|
||||
|
||||
/**
|
||||
|
|
@ -230,9 +232,14 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
async getDirectoryUsage(path) {
|
||||
let result = await this.exec("du", ["-s", "--block-size=1", path]);
|
||||
let size = result.stdout.split("\t", 1)[0];
|
||||
return size;
|
||||
if (this.getNodeIsWindows()) {
|
||||
this.ctx.logger.warn("du not implemented on windows");
|
||||
return 0;
|
||||
} else {
|
||||
let result = await this.exec("du", ["-s", "--block-size=1", path]);
|
||||
let size = result.stdout.split("\t", 1)[0];
|
||||
return size;
|
||||
}
|
||||
}
|
||||
|
||||
exec(command, args, options = {}) {
|
||||
|
|
@ -297,20 +304,39 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
async cloneDir(source_path, target_path) {
|
||||
await this.exec("mkdir", ["-p", target_path]);
|
||||
if (this.getNodeIsWindows()) {
|
||||
fse.copySync(
|
||||
this.stripTrailingSlash(source_path),
|
||||
this.stripTrailingSlash(target_path),
|
||||
{
|
||||
overwrite: true,
|
||||
dereference: true,
|
||||
preserveTimestamps: true,
|
||||
//errorOnExist: true,
|
||||
}
|
||||
);
|
||||
} else {
|
||||
await this.createDir(target_path);
|
||||
|
||||
/**
|
||||
* trailing / is important
|
||||
* rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/
|
||||
*/
|
||||
await this.exec("rsync", [
|
||||
"-a",
|
||||
this.stripTrailingSlash(source_path) + "/",
|
||||
this.stripTrailingSlash(target_path) + "/",
|
||||
]);
|
||||
/**
|
||||
* trailing / is important
|
||||
* rsync -a /mnt/storage/s/foo/ /mnt/storage/v/PVC-111/
|
||||
*/
|
||||
await this.exec("rsync", [
|
||||
"-a",
|
||||
this.stripTrailingSlash(source_path) + "/",
|
||||
this.stripTrailingSlash(target_path) + "/",
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
async getAvailableSpaceAtPath(path) {
|
||||
// https://www.npmjs.com/package/diskusage
|
||||
// https://www.npmjs.com/package/check-disk-space
|
||||
if (this.getNodeIsWindows()) {
|
||||
this.ctx.logger.warn("df not implemented on windows");
|
||||
return 0;
|
||||
}
|
||||
//df --block-size=1 --output=avail /mnt/storage/
|
||||
// Avail
|
||||
//1481334328
|
||||
|
|
@ -325,11 +351,14 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
async createDir(path) {
|
||||
await this.exec("mkdir", ["-p", path]);
|
||||
fs.mkdirSync(path, {
|
||||
recursive: true,
|
||||
mode: "755",
|
||||
});
|
||||
}
|
||||
|
||||
async deleteDir(path) {
|
||||
await this.exec("rm", ["-rf", path]);
|
||||
fs.rmSync(path, { recursive: true, force: true });
|
||||
|
||||
return;
|
||||
|
||||
|
|
@ -346,7 +375,40 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
async directoryExists(path) {
|
||||
return fs.existsSync(path);
|
||||
let r;
|
||||
r = fs.existsSync(path);
|
||||
if (!r) {
|
||||
return r;
|
||||
}
|
||||
|
||||
if (!fs.statSync(path).isDirectory()) {
|
||||
throw new Error(`path [${path}] exists but is not a directory`);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Have to be careful with the logic here as the controller could be running
|
||||
* on win32 for *-client vs local-hostpath
|
||||
*
|
||||
* @param {*} path
|
||||
* @returns
|
||||
*/
|
||||
async normalizePath(path) {
|
||||
if (process.platform == "win32") {
|
||||
return await this.noramlizePathWin32(path);
|
||||
} else {
|
||||
return await this.normalizePathPosix(path);
|
||||
}
|
||||
}
|
||||
|
||||
async normalizePathPosix(p) {
|
||||
return p.replaceAll(path.win32.sep, path.posix.sep);
|
||||
}
|
||||
|
||||
async noramlizePathWin32(p) {
|
||||
return p.replaceAll(path.posix.sep, path.win32.sep);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -441,7 +503,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
//let volume_content_source_volume_id;
|
||||
|
||||
// create target dir
|
||||
response = await driver.exec("mkdir", ["-p", volume_path]);
|
||||
await driver.createDir(volume_path);
|
||||
|
||||
// create dataset
|
||||
if (volume_content_source) {
|
||||
|
|
@ -476,7 +538,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
driver.ctx.logger.debug("controller source path: %s", source_path);
|
||||
response = await driver.cloneDir(source_path, volume_path);
|
||||
await driver.cloneDir(source_path, volume_path);
|
||||
}
|
||||
|
||||
// set mode
|
||||
|
|
@ -486,10 +548,7 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
this.options[config_key].dirPermissionsMode,
|
||||
volume_path
|
||||
);
|
||||
response = await driver.exec("chmod", [
|
||||
this.options[config_key].dirPermissionsMode,
|
||||
volume_path,
|
||||
]);
|
||||
fs.chmodSync(volume_path, this.options[config_key].dirPermissionsMode);
|
||||
}
|
||||
|
||||
// set ownership
|
||||
|
|
@ -503,16 +562,20 @@ class ControllerClientCommonDriver extends CsiBaseDriver {
|
|||
this.options[config_key].dirPermissionsGroup,
|
||||
volume_path
|
||||
);
|
||||
response = await driver.exec("chown", [
|
||||
(this.options[config_key].dirPermissionsUser
|
||||
? this.options[config_key].dirPermissionsUser
|
||||
: "") +
|
||||
":" +
|
||||
(this.options[config_key].dirPermissionsGroup
|
||||
? this.options[config_key].dirPermissionsGroup
|
||||
: ""),
|
||||
volume_path,
|
||||
]);
|
||||
if (this.getNodeIsWindows()) {
|
||||
driver.ctx.logger.warn("chown not implemented on windows");
|
||||
} else {
|
||||
await driver.exec("chown", [
|
||||
(this.options[config_key].dirPermissionsUser
|
||||
? this.options[config_key].dirPermissionsUser
|
||||
: "") +
|
||||
":" +
|
||||
(this.options[config_key].dirPermissionsGroup
|
||||
? this.options[config_key].dirPermissionsGroup
|
||||
: ""),
|
||||
volume_path,
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
let volume_context = driver.getVolumeContext(name);
|
||||
|
|
|
|||
|
|
@ -4,10 +4,81 @@ const https = require("https");
|
|||
const { axios_request, stringify } = require("../../../utils/general");
|
||||
const Mutex = require("async-mutex").Mutex;
|
||||
const registry = require("../../../utils/registry");
|
||||
const { GrpcError, grpc } = require("../../../utils/grpc");
|
||||
|
||||
const USER_AGENT = "democratic-csi";
|
||||
const __REGISTRY_NS__ = "SynologyHttpClient";
|
||||
|
||||
SYNO_ERRORS = {
|
||||
400: {
|
||||
status: grpc.status.UNAUTHENTICATED,
|
||||
message: "Failed to authenticate to the Synology DSM.",
|
||||
},
|
||||
407: {
|
||||
status: grpc.status.UNAUTHENTICATED,
|
||||
message:
|
||||
"IP has been blocked to the Synology DSM due to too many failed attempts.",
|
||||
},
|
||||
18990002: {
|
||||
status: grpc.status.RESOURCE_EXHAUSTED,
|
||||
message: "The synology volume is out of disk space.",
|
||||
},
|
||||
18990318: {
|
||||
status: grpc.status.INVALID_ARGUMENT,
|
||||
message:
|
||||
"The requested lun type is incompatible with the Synology filesystem.",
|
||||
},
|
||||
18990538: {
|
||||
status: grpc.status.ALREADY_EXISTS,
|
||||
message: "A LUN with this name already exists.",
|
||||
},
|
||||
18990541: {
|
||||
status: grpc.status.RESOURCE_EXHAUSTED,
|
||||
message: "The maximum number of LUNS has been reached.",
|
||||
},
|
||||
18990542: {
|
||||
status: grpc.status.RESOURCE_EXHAUSTED,
|
||||
message: "The maximum number if iSCSI target has been reached.",
|
||||
},
|
||||
18990708: {
|
||||
status: grpc.status.INVALID_ARGUMENT,
|
||||
message: "Bad target auth info.",
|
||||
},
|
||||
18990744: {
|
||||
status: grpc.status.ALREADY_EXISTS,
|
||||
message: "An iSCSI target with this name already exists.",
|
||||
},
|
||||
18990532: { status: grpc.status.NOT_FOUND, message: "No such snapshot." },
|
||||
18990500: { status: grpc.status.INVALID_ARGUMENT, message: "Bad LUN type" },
|
||||
18990543: {
|
||||
status: grpc.status.RESOURCE_EXHAUSTED,
|
||||
message: "Maximum number of snapshots reached.",
|
||||
},
|
||||
18990635: {
|
||||
status: grpc.status.INVALID_ARGUMENT,
|
||||
message: "Invalid ioPolicy.",
|
||||
},
|
||||
};
|
||||
|
||||
class SynologyError extends GrpcError {
|
||||
constructor(code, httpCode = undefined) {
|
||||
super(0, "");
|
||||
this.synoCode = code;
|
||||
this.httpCode = httpCode;
|
||||
if (code > 0) {
|
||||
const error = SYNO_ERRORS[code];
|
||||
this.code = error && error.status ? error.status : grpc.status.UNKNOWN;
|
||||
this.message =
|
||||
error && error.message
|
||||
? error.message
|
||||
: `An unknown error occurred when executing a synology command (code = ${code}).`;
|
||||
} else {
|
||||
this.code = grpc.status.UNKNOWN;
|
||||
this.message = `The synology webserver returned a status code ${httpCode}`;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class SynologyHttpClient {
|
||||
constructor(options = {}) {
|
||||
this.options = JSON.parse(JSON.stringify(options));
|
||||
|
|
@ -44,32 +115,49 @@ class SynologyHttpClient {
|
|||
}
|
||||
|
||||
log_response(error, response, body, options) {
|
||||
let prop;
|
||||
let val;
|
||||
|
||||
prop = "auth.username";
|
||||
val = _.get(options, prop, false);
|
||||
if (val) {
|
||||
_.set(options, prop, "redacted");
|
||||
const cleansedBody = JSON.parse(stringify(body));
|
||||
const cleansedOptions = JSON.parse(stringify(options));
|
||||
// This function handles arrays and objects
|
||||
function recursiveCleanse(obj) {
|
||||
for (const k in obj) {
|
||||
if (typeof obj[k] == "object" && obj[k] !== null) {
|
||||
recursiveCleanse(obj[k]);
|
||||
} else {
|
||||
if (
|
||||
[
|
||||
"account",
|
||||
"passwd",
|
||||
"username",
|
||||
"password",
|
||||
"_sid",
|
||||
"sid",
|
||||
"Authorization",
|
||||
"authorization",
|
||||
"user",
|
||||
"mutual_user",
|
||||
"mutual_password",
|
||||
].includes(k)
|
||||
) {
|
||||
obj[k] = "redacted";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
recursiveCleanse(cleansedBody);
|
||||
recursiveCleanse(cleansedOptions);
|
||||
|
||||
prop = "auth.password";
|
||||
val = _.get(options, prop, false);
|
||||
if (val) {
|
||||
_.set(options, prop, "redacted");
|
||||
}
|
||||
delete cleansedOptions.httpAgent;
|
||||
delete cleansedOptions.httpsAgent;
|
||||
|
||||
prop = "headers.Authorization";
|
||||
val = _.get(options, prop, false);
|
||||
if (val) {
|
||||
_.set(options, prop, "redacted");
|
||||
}
|
||||
|
||||
this.logger.debug("SYNOLOGY HTTP REQUEST: " + stringify(options));
|
||||
this.logger.debug("SYNOLOGY HTTP REQUEST: " + stringify(cleansedOptions));
|
||||
this.logger.debug("SYNOLOGY HTTP ERROR: " + error);
|
||||
this.logger.debug("SYNOLOGY HTTP STATUS: " + response.statusCode);
|
||||
this.logger.debug("SYNOLOGY HTTP HEADERS: " + stringify(response.headers));
|
||||
this.logger.debug("SYNOLOGY HTTP BODY: " + stringify(body));
|
||||
this.logger.debug(
|
||||
"SYNOLOGY HTTP STATUS: " + _.get(response, "statusCode", "")
|
||||
);
|
||||
this.logger.debug(
|
||||
"SYNOLOGY HTTP HEADERS: " + stringify(_.get(response, "headers", ""))
|
||||
);
|
||||
this.logger.debug("SYNOLOGY HTTP BODY: " + stringify(cleansedBody));
|
||||
}
|
||||
|
||||
async do_request(method, path, data = {}, options = {}) {
|
||||
|
|
@ -149,7 +237,7 @@ class SynologyHttpClient {
|
|||
}
|
||||
|
||||
if (response.statusCode > 299 || response.statusCode < 200) {
|
||||
reject(response);
|
||||
reject(new SynologyError(null, response.statusCode));
|
||||
}
|
||||
|
||||
if (response.body.success === false) {
|
||||
|
|
@ -157,7 +245,9 @@ class SynologyHttpClient {
|
|||
if (response.body.error.code == 119 && sid == client.sid) {
|
||||
client.sid = null;
|
||||
}
|
||||
reject(response);
|
||||
reject(
|
||||
new SynologyError(response.body.error.code, response.statusCode)
|
||||
);
|
||||
}
|
||||
|
||||
resolve(response);
|
||||
|
|
@ -293,19 +383,19 @@ class SynologyHttpClient {
|
|||
return snapshots;
|
||||
}
|
||||
|
||||
async GetSnapshotByLunIDAndName(lun_id, name) {
|
||||
async GetSnapshotByLunUUIDAndName(lun_uuid, name) {
|
||||
const get_snapshot_info = {
|
||||
lid: lun_id, //check?
|
||||
api: "SYNO.Core.Storage.iSCSILUN",
|
||||
method: "load_snapshot",
|
||||
api: "SYNO.Core.ISCSI.LUN",
|
||||
method: "list_snapshot",
|
||||
version: 1,
|
||||
src_lun_uuid: JSON.stringify(lun_uuid),
|
||||
};
|
||||
|
||||
let response = await this.do_request("GET", "entry.cgi", get_snapshot_info);
|
||||
|
||||
if (response.body.data) {
|
||||
let snapshot = response.body.data.find((i) => {
|
||||
return i.desc == name;
|
||||
if (response.body.data.snapshots) {
|
||||
let snapshot = response.body.data.snapshots.find((i) => {
|
||||
return i.description == name;
|
||||
});
|
||||
|
||||
if (snapshot) {
|
||||
|
|
@ -314,18 +404,18 @@ class SynologyHttpClient {
|
|||
}
|
||||
}
|
||||
|
||||
async GetSnapshotByLunIDAndSnapshotUUID(lun_id, snapshot_uuid) {
|
||||
async GetSnapshotByLunUUIDAndSnapshotUUID(lun_uuid, snapshot_uuid) {
|
||||
const get_snapshot_info = {
|
||||
lid: lun_id, //check?
|
||||
api: "SYNO.Core.Storage.iSCSILUN",
|
||||
method: "load_snapshot",
|
||||
api: "SYNO.Core.ISCSI.LUN",
|
||||
method: "list_snapshot",
|
||||
version: 1,
|
||||
src_lun_uuid: JSON.stringify(lun_uuid),
|
||||
};
|
||||
|
||||
let response = await this.do_request("GET", "entry.cgi", get_snapshot_info);
|
||||
|
||||
if (response.body.data) {
|
||||
let snapshot = response.body.data.find((i) => {
|
||||
if (response.body.data.snapshots) {
|
||||
let snapshot = response.body.data.snapshots.find((i) => {
|
||||
return i.uuid == snapshot_uuid;
|
||||
});
|
||||
|
||||
|
|
@ -412,7 +502,7 @@ class SynologyHttpClient {
|
|||
response = await this.do_request("GET", "entry.cgi", iscsi_lun_create);
|
||||
return response.body.data.uuid;
|
||||
} catch (err) {
|
||||
if ([18990538].includes(err.body.error.code)) {
|
||||
if (err.synoCode === 18990538) {
|
||||
response = await this.do_request("GET", "entry.cgi", lun_list);
|
||||
let lun = response.body.data.luns.find((i) => {
|
||||
return i.name == iscsi_lun_create.name;
|
||||
|
|
@ -503,7 +593,7 @@ class SynologyHttpClient {
|
|||
|
||||
return response.body.data.target_id;
|
||||
} catch (err) {
|
||||
if ([18990744].includes(err.body.error.code)) {
|
||||
if (err.synoCode === 18990744) {
|
||||
//do lookup
|
||||
const iscsi_target_list = {
|
||||
api: "SYNO.Core.ISCSI.Target",
|
||||
|
|
@ -549,7 +639,7 @@ class SynologyHttpClient {
|
|||
/**
|
||||
* 18990710 = non-existant
|
||||
*/
|
||||
//if (![18990710].includes(err.body.error.code)) {
|
||||
//if (err.synoCode !== 18990710) {
|
||||
throw err;
|
||||
//}
|
||||
}
|
||||
|
|
@ -572,20 +662,34 @@ class SynologyHttpClient {
|
|||
);
|
||||
}
|
||||
|
||||
async CreateClonedVolume(src_lun_uuid, dst_lun_name) {
|
||||
async CreateClonedVolume(
|
||||
src_lun_uuid,
|
||||
dst_lun_name,
|
||||
dst_location,
|
||||
description
|
||||
) {
|
||||
const create_cloned_volume = {
|
||||
api: "SYNO.Core.ISCSI.LUN",
|
||||
version: 1,
|
||||
method: "clone",
|
||||
src_lun_uuid: JSON.stringify(src_lun_uuid), // src lun uuid
|
||||
dst_lun_name: dst_lun_name, // dst lun name
|
||||
dst_location: dst_location,
|
||||
is_same_pool: true, // always true? string?
|
||||
clone_type: "democratic-csi", // check
|
||||
};
|
||||
if (description) {
|
||||
create_cloned_volume.description = description;
|
||||
}
|
||||
return await this.do_request("GET", "entry.cgi", create_cloned_volume);
|
||||
}
|
||||
|
||||
async CreateVolumeFromSnapshot(src_lun_uuid, snapshot_uuid, cloned_lun_name) {
|
||||
async CreateVolumeFromSnapshot(
|
||||
src_lun_uuid,
|
||||
snapshot_uuid,
|
||||
cloned_lun_name,
|
||||
description
|
||||
) {
|
||||
const create_volume_from_snapshot = {
|
||||
api: "SYNO.Core.ISCSI.LUN",
|
||||
version: 1,
|
||||
|
|
@ -595,6 +699,9 @@ class SynologyHttpClient {
|
|||
cloned_lun_name: cloned_lun_name, // cloned lun name
|
||||
clone_type: "democratic-csi", // check
|
||||
};
|
||||
if (description) {
|
||||
create_volume_from_snapshot.description = description;
|
||||
}
|
||||
return await this.do_request(
|
||||
"GET",
|
||||
"entry.cgi",
|
||||
|
|
|
|||
|
|
@ -1,9 +1,12 @@
|
|||
const _ = require("lodash");
|
||||
const { CsiBaseDriver } = require("../index");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const Handlebars = require("handlebars");
|
||||
const registry = require("../../utils/registry");
|
||||
const SynologyHttpClient = require("./http").SynologyHttpClient;
|
||||
const semver = require("semver");
|
||||
const sleep = require("../../utils/general").sleep;
|
||||
const yaml = require("js-yaml");
|
||||
|
||||
const __REGISTRY_NS__ = "ControllerSynologyDriver";
|
||||
|
||||
|
|
@ -142,6 +145,37 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
getObjectFromDevAttribs(list = []) {
|
||||
if (!list) {
|
||||
return {};
|
||||
}
|
||||
return list.reduce(
|
||||
(obj, item) => Object.assign(obj, { [item.dev_attrib]: item.enable }),
|
||||
{}
|
||||
);
|
||||
}
|
||||
|
||||
getDevAttribsFromObject(obj, keepNull = false) {
|
||||
return Object.entries(obj)
|
||||
.filter((e) => keepNull || e[1] != null)
|
||||
.map((e) => ({ dev_attrib: e[0], enable: e[1] }));
|
||||
}
|
||||
|
||||
parseParameterYamlData(data, fieldHint = "") {
|
||||
try {
|
||||
return yaml.load(data);
|
||||
} catch {
|
||||
if (err instanceof yaml.YAMLException) {
|
||||
throw new GrpcError(
|
||||
grpc.status.INVALID_ARGUMENT,
|
||||
`${fieldHint} not a valid YAML document.`.trim()
|
||||
);
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buildIscsiName(name) {
|
||||
let iscsiName = name;
|
||||
if (this.options.iscsi.namePrefix) {
|
||||
|
|
@ -155,6 +189,25 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
return iscsiName.toLowerCase();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the value for the 'location' parameter indicating on which volume
|
||||
* a LUN is to be created.
|
||||
*
|
||||
* @param {Object} parameters - Parameters received from a StorageClass
|
||||
* @param {String} parameters.volume - The volume specified by the StorageClass
|
||||
* @returns {String} The location of the volume.
|
||||
*/
|
||||
getLocation() {
|
||||
let location = _.get(this.options, "synology.volume");
|
||||
if (!location) {
|
||||
location = "volume1";
|
||||
}
|
||||
if (!location.startsWith("/")) {
|
||||
location = "/" + location;
|
||||
}
|
||||
return location;
|
||||
}
|
||||
|
||||
assertCapabilities(capabilities) {
|
||||
const driverResourceType = this.getDriverResourceType();
|
||||
this.ctx.logger.verbose("validating capabilities: %j", capabilities);
|
||||
|
|
@ -171,7 +224,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
!["nfs", "cifs"].includes(capability.mount.fs_type)
|
||||
!GeneralUtils.default_supported_file_filesystems().includes(
|
||||
capability.mount.fs_type
|
||||
)
|
||||
) {
|
||||
message = `invalid fs_type ${capability.mount.fs_type}`;
|
||||
return false;
|
||||
|
|
@ -198,7 +253,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
if (capability.access_type == "mount") {
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
!["btrfs", "ext3", "ext4", "ext4dev", "xfs"].includes(
|
||||
!GeneralUtils.default_supported_block_filesystems().includes(
|
||||
capability.mount.fs_type
|
||||
)
|
||||
) {
|
||||
|
|
@ -310,6 +365,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
let volume_context = {};
|
||||
const normalizedParameters = driver.getNormalizedParameters(
|
||||
call.request.parameters
|
||||
);
|
||||
switch (driver.getDriverShareType()) {
|
||||
case "nfs":
|
||||
// TODO: create volume here
|
||||
|
|
@ -327,12 +385,53 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
break;
|
||||
case "iscsi":
|
||||
let iscsiName = driver.buildIscsiName(name);
|
||||
let lunTemplate;
|
||||
let targetTemplate;
|
||||
let data;
|
||||
let target;
|
||||
let lun_mapping;
|
||||
let lun_uuid;
|
||||
let existingLun;
|
||||
|
||||
lunTemplate = Object.assign(
|
||||
{},
|
||||
_.get(driver.options, "iscsi.lunTemplate", {}),
|
||||
driver.parseParameterYamlData(
|
||||
_.get(normalizedParameters, "lunTemplate", "{}"),
|
||||
"parameters.lunTemplate"
|
||||
),
|
||||
driver.parseParameterYamlData(
|
||||
_.get(call.request, "secrets.lunTemplate", "{}"),
|
||||
"secrets.lunTemplate"
|
||||
)
|
||||
);
|
||||
targetTemplate = Object.assign(
|
||||
{},
|
||||
_.get(driver.options, "iscsi.targetTemplate", {}),
|
||||
driver.parseParameterYamlData(
|
||||
_.get(normalizedParameters, "targetTemplate", "{}"),
|
||||
"parameters.targetTemplate"
|
||||
),
|
||||
driver.parseParameterYamlData(
|
||||
_.get(call.request, "secrets.targetTemplate", "{}"),
|
||||
"secrets.targetTemplate"
|
||||
)
|
||||
);
|
||||
|
||||
// render the template for description
|
||||
if (lunTemplate.description) {
|
||||
lunTemplate.description = Handlebars.compile(lunTemplate.description)(
|
||||
{
|
||||
name: call.request.name,
|
||||
parameters: call.request.parameters,
|
||||
csi: {
|
||||
name: this.ctx.args.csiName,
|
||||
version: this.ctx.args.csiVersion,
|
||||
},
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
// ensure volumes with the same name being requested a 2nd time but with a different size fails
|
||||
try {
|
||||
let lun = await httpClient.GetLunByName(iscsiName);
|
||||
|
|
@ -361,13 +460,12 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
|
||||
if (volume_content_source) {
|
||||
let src_lun_uuid;
|
||||
let src_lun_id;
|
||||
switch (volume_content_source.type) {
|
||||
case "snapshot":
|
||||
let parts = volume_content_source.snapshot.snapshot_id.split("/");
|
||||
|
||||
src_lun_id = parts[2];
|
||||
if (!src_lun_id) {
|
||||
src_lun_uuid = parts[2];
|
||||
if (!src_lun_uuid) {
|
||||
throw new GrpcError(
|
||||
grpc.status.NOT_FOUND,
|
||||
`invalid snapshot_id: ${volume_content_source.snapshot.snapshot_id}`
|
||||
|
|
@ -382,13 +480,17 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
);
|
||||
}
|
||||
|
||||
let src_lun = await httpClient.GetLunByID(src_lun_id);
|
||||
src_lun_uuid = src_lun.uuid;
|
||||
// This is for backwards compatibility. Previous versions of this driver used the LUN ID instead of the
|
||||
// UUID. If this is the case we need to get the LUN UUID before we can proceed.
|
||||
if (!src_lun_uuid.includes("-")) {
|
||||
src_lun_uuid = await httpClient.GetLunByID(src_lun_uuid).uuid;
|
||||
}
|
||||
|
||||
let snapshot = await httpClient.GetSnapshotByLunIDAndSnapshotUUID(
|
||||
src_lun_id,
|
||||
snapshot_uuid
|
||||
);
|
||||
let snapshot =
|
||||
await httpClient.GetSnapshotByLunUUIDAndSnapshotUUID(
|
||||
src_lun_uuid,
|
||||
snapshot_uuid
|
||||
);
|
||||
if (!snapshot) {
|
||||
throw new GrpcError(
|
||||
grpc.status.NOT_FOUND,
|
||||
|
|
@ -401,7 +503,8 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
await httpClient.CreateVolumeFromSnapshot(
|
||||
src_lun_uuid,
|
||||
snapshot_uuid,
|
||||
iscsiName
|
||||
iscsiName,
|
||||
lunTemplate.description
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
|
@ -425,7 +528,12 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
`invalid volume_id: ${volume_content_source.volume.volume_id}`
|
||||
);
|
||||
}
|
||||
await httpClient.CreateClonedVolume(src_lun_uuid, iscsiName);
|
||||
await httpClient.CreateClonedVolume(
|
||||
src_lun_uuid,
|
||||
iscsiName,
|
||||
driver.getLocation(),
|
||||
lunTemplate.description
|
||||
);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
@ -444,20 +552,22 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
} else {
|
||||
// create lun
|
||||
data = Object.assign({}, driver.options.iscsi.lunTemplate, {
|
||||
data = Object.assign({}, lunTemplate, {
|
||||
name: iscsiName,
|
||||
location: driver.options.synology.volume,
|
||||
location: driver.getLocation(),
|
||||
size: capacity_bytes,
|
||||
});
|
||||
|
||||
lun_uuid = await httpClient.CreateLun(data);
|
||||
}
|
||||
|
||||
// create target
|
||||
let iqn = driver.options.iscsi.baseiqn + iscsiName;
|
||||
data = Object.assign({}, driver.options.iscsi.targetTemplate, {
|
||||
data = Object.assign({}, targetTemplate, {
|
||||
name: iscsiName,
|
||||
iqn,
|
||||
});
|
||||
|
||||
let target_id = await httpClient.CreateTarget(data);
|
||||
//target = await httpClient.GetTargetByTargetID(target_id);
|
||||
target = await httpClient.GetTargetByIQN(iqn);
|
||||
|
|
@ -609,12 +719,12 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
|
||||
let waitTimeBetweenChecks = settleSeconds * 1000;
|
||||
|
||||
await sleep(waitTimeBetweenChecks);
|
||||
await GeneralUtils.sleep(waitTimeBetweenChecks);
|
||||
lun_uuid = await httpClient.GetLunUUIDByName(iscsiName);
|
||||
|
||||
while (currentCheck <= settleMaxRetries && lun_uuid) {
|
||||
currentCheck++;
|
||||
await sleep(waitTimeBetweenChecks);
|
||||
await GeneralUtils.sleep(waitTimeBetweenChecks);
|
||||
lun_uuid = await httpClient.GetLunUUIDByName(iscsiName);
|
||||
}
|
||||
|
||||
|
|
@ -737,8 +847,9 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
async GetCapacity(call) {
|
||||
const driver = this;
|
||||
const httpClient = await driver.getHttpClient();
|
||||
const location = driver.getLocation();
|
||||
|
||||
if (!driver.options.synology.volume) {
|
||||
if (!location) {
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: missing volume`
|
||||
|
|
@ -753,9 +864,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
let response = await httpClient.GetVolumeInfo(
|
||||
driver.options.synology.volume
|
||||
);
|
||||
let response = await httpClient.GetVolumeInfo(location);
|
||||
return { available_capacity: response.body.data.volume.size_free_byte };
|
||||
}
|
||||
|
||||
|
|
@ -833,6 +942,24 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
);
|
||||
}
|
||||
|
||||
const normalizedParameters = driver.getNormalizedParameters(
|
||||
call.request.parameters
|
||||
);
|
||||
let lunSnapshotTemplate;
|
||||
|
||||
lunSnapshotTemplate = Object.assign(
|
||||
{},
|
||||
_.get(driver.options, "iscsi.lunSnapshotTemplate", {}),
|
||||
driver.parseParameterYamlData(
|
||||
_.get(normalizedParameters, "lunSnapshotTemplate", "{}"),
|
||||
"parameters.lunSnapshotTemplate"
|
||||
),
|
||||
driver.parseParameterYamlData(
|
||||
_.get(call.request, "secrets.lunSnapshotTemplate", "{}"),
|
||||
"secrets.lunSnapshotTemplate"
|
||||
)
|
||||
);
|
||||
|
||||
// check for other snapshopts with the same name on other volumes and fail as appropriate
|
||||
// TODO: technically this should only be checking lun/snapshots relevant to this specific install of the driver
|
||||
// but alas an isolation/namespacing mechanism does not exist in synology
|
||||
|
|
@ -848,16 +975,16 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
|
||||
// check for already exists
|
||||
let snapshot;
|
||||
snapshot = await httpClient.GetSnapshotByLunIDAndName(lun.lun_id, name);
|
||||
snapshot = await httpClient.GetSnapshotByLunUUIDAndName(lun.uuid, name);
|
||||
if (!snapshot) {
|
||||
let data = Object.assign({}, driver.options.iscsi.lunSnapshotTemplate, {
|
||||
let data = Object.assign({}, lunSnapshotTemplate, {
|
||||
src_lun_uuid: lun.uuid,
|
||||
taken_by: "democratic-csi",
|
||||
description: name, //check
|
||||
});
|
||||
|
||||
await httpClient.CreateSnapshot(data);
|
||||
snapshot = await httpClient.GetSnapshotByLunIDAndName(lun.lun_id, name);
|
||||
snapshot = await httpClient.GetSnapshotByLunUUIDAndName(lun.uuid, name);
|
||||
|
||||
if (!snapshot) {
|
||||
throw new Error(`failed to create snapshot`);
|
||||
|
|
@ -871,7 +998,7 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
* is needed to create a volume from this snapshot.
|
||||
*/
|
||||
size_bytes: snapshot.total_size,
|
||||
snapshot_id: `/lun/${lun.lun_id}/${snapshot.uuid}`, // add shanpshot_uuid //fixme
|
||||
snapshot_id: `/lun/${lun.uuid}/${snapshot.uuid}`,
|
||||
source_volume_id: source_volume_id,
|
||||
//https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/timestamp.proto
|
||||
creation_time: {
|
||||
|
|
@ -908,8 +1035,8 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
let parts = snapshot_id.split("/");
|
||||
let lun_id = parts[2];
|
||||
if (!lun_id) {
|
||||
let lun_uuid = parts[2];
|
||||
if (!lun_uuid) {
|
||||
return {};
|
||||
}
|
||||
|
||||
|
|
@ -918,9 +1045,14 @@ class ControllerSynologyDriver extends CsiBaseDriver {
|
|||
return {};
|
||||
}
|
||||
|
||||
// TODO: delete snapshot
|
||||
let snapshot = await httpClient.GetSnapshotByLunIDAndSnapshotUUID(
|
||||
lun_id,
|
||||
// This is for backwards compatibility. Previous versions of this driver used the LUN ID instead of the UUID. If
|
||||
// this is the case we need to get the LUN UUID before we can proceed.
|
||||
if (!lun_uuid.includes("-")) {
|
||||
lun_uuid = await httpClient.GetLunByID(lun_uuid).uuid;
|
||||
}
|
||||
|
||||
let snapshot = await httpClient.GetSnapshotByLunUUIDAndSnapshotUUID(
|
||||
lun_uuid,
|
||||
snapshot_uuid
|
||||
);
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
const _ = require("lodash");
|
||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const registry = require("../../utils/registry");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const sleep = require("../../utils/general").sleep;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
|
@ -52,6 +52,7 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
getDriverZfsResourceType() {
|
||||
switch (this.options.driver) {
|
||||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-smb":
|
||||
return "filesystem";
|
||||
case "zfs-generic-iscsi":
|
||||
return "volume";
|
||||
|
|
@ -60,6 +61,24 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
generateSmbShareName(datasetName) {
|
||||
const driver = this;
|
||||
|
||||
driver.ctx.logger.verbose(
|
||||
`generating smb share name for dataset: ${datasetName}`
|
||||
);
|
||||
|
||||
let name = datasetName || "";
|
||||
name = name.replaceAll("/", "_");
|
||||
name = name.replaceAll("-", "_");
|
||||
|
||||
driver.ctx.logger.verbose(
|
||||
`generated smb share name for dataset (${datasetName}): ${name}`
|
||||
);
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* should create any necessary share resources
|
||||
* should set the SHARE_VOLUME_CONTEXT_PROPERTY_NAME propery
|
||||
|
|
@ -67,6 +86,7 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
* @param {*} datasetName
|
||||
*/
|
||||
async createShare(call, datasetName) {
|
||||
const driver = this;
|
||||
const zb = await this.getZetabyte();
|
||||
const execClient = this.getExecClient();
|
||||
|
||||
|
|
@ -109,6 +129,41 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-generic-smb":
|
||||
let share;
|
||||
switch (this.options.smb.shareStrategy) {
|
||||
case "setDatasetProperties":
|
||||
for (let key of ["share", "sharesmb"]) {
|
||||
if (
|
||||
this.options.smb.shareStrategySetDatasetProperties.properties[
|
||||
key
|
||||
]
|
||||
) {
|
||||
await zb.zfs.set(datasetName, {
|
||||
[key]:
|
||||
this.options.smb.shareStrategySetDatasetProperties
|
||||
.properties[key],
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
share = driver.generateSmbShareName(datasetName);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
properties = await zb.zfs.get(datasetName, ["mountpoint"]);
|
||||
properties = properties[datasetName];
|
||||
this.ctx.logger.debug("zfs props data: %j", properties);
|
||||
|
||||
volume_context = {
|
||||
node_attach_driver: "smb",
|
||||
server: this.options.smb.shareHost,
|
||||
share,
|
||||
};
|
||||
return volume_context;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
let basename;
|
||||
let iscsiName;
|
||||
|
|
@ -176,8 +231,12 @@ class ControllerZfsGenericDriver extends ControllerZfsBaseDriver {
|
|||
}
|
||||
}
|
||||
|
||||
response = await this.targetCliCommand(
|
||||
`
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.targetCliCommand(
|
||||
`
|
||||
# create target
|
||||
cd /iscsi
|
||||
create ${basename}:${iscsiName}
|
||||
|
|
@ -195,6 +254,16 @@ create ${iscsiName} /dev/${extentDiskName}
|
|||
cd /iscsi/${basename}:${iscsiName}/tpg1/luns
|
||||
create /backstores/block/${iscsiName}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
break;
|
||||
default:
|
||||
|
|
@ -258,7 +327,7 @@ create /backstores/block/${iscsiName}
|
|||
}
|
||||
}
|
||||
}
|
||||
await sleep(2000); // let things settle
|
||||
await GeneralUtils.sleep(2000); // let things settle
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
|
|
@ -268,6 +337,36 @@ create /backstores/block/${iscsiName}
|
|||
}
|
||||
break;
|
||||
|
||||
case "zfs-generic-smb":
|
||||
switch (this.options.smb.shareStrategy) {
|
||||
case "setDatasetProperties":
|
||||
for (let key of ["share", "sharesmb"]) {
|
||||
if (
|
||||
this.options.smb.shareStrategySetDatasetProperties.properties[
|
||||
key
|
||||
]
|
||||
) {
|
||||
try {
|
||||
await zb.zfs.inherit(datasetName, key);
|
||||
} catch (err) {
|
||||
if (err.toString().includes("dataset does not exist")) {
|
||||
// do nothing
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
await GeneralUtils.sleep(2000); // let things settle
|
||||
break;
|
||||
default:
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`invalid configuration: unknown shareStrategy ${this.options.smb.shareStrategy}`
|
||||
);
|
||||
}
|
||||
break;
|
||||
|
||||
case "zfs-generic-iscsi":
|
||||
let basename;
|
||||
let iscsiName;
|
||||
|
|
@ -307,8 +406,12 @@ create /backstores/block/${iscsiName}
|
|||
switch (this.options.iscsi.shareStrategy) {
|
||||
case "targetCli":
|
||||
basename = this.options.iscsi.shareStrategyTargetCli.basename;
|
||||
response = await this.targetCliCommand(
|
||||
`
|
||||
await GeneralUtils.retry(
|
||||
3,
|
||||
2000,
|
||||
async () => {
|
||||
await this.targetCliCommand(
|
||||
`
|
||||
# delete target
|
||||
cd /iscsi
|
||||
delete ${basename}:${iscsiName}
|
||||
|
|
@ -317,7 +420,18 @@ delete ${basename}:${iscsiName}
|
|||
cd /backstores/block
|
||||
delete ${iscsiName}
|
||||
`
|
||||
);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.stdout && err.stdout.includes("Ran out of input")) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
@ -362,19 +476,19 @@ delete ${iscsiName}
|
|||
|
||||
let command = "sh";
|
||||
let args = ["-c"];
|
||||
let taregetCliCommand = [];
|
||||
taregetCliCommand.push(`echo "${data}"`.trim());
|
||||
taregetCliCommand.push("|");
|
||||
taregetCliCommand.push("targetcli");
|
||||
|
||||
let targetCliArgs = ["targetcli"];
|
||||
if (
|
||||
_.get(this.options, "iscsi.shareStrategyTargetCli.sudoEnabled", false)
|
||||
) {
|
||||
command = "sudo";
|
||||
args.unshift("sh");
|
||||
targetCliArgs.unshift("sudo");
|
||||
}
|
||||
|
||||
args.push("'" + taregetCliCommand.join(" ") + "'");
|
||||
let targetCliCommand = [];
|
||||
targetCliCommand.push(`echo "${data}"`.trim());
|
||||
targetCliCommand.push("|");
|
||||
targetCliCommand.push(targetCliArgs.join(" "));
|
||||
args.push("'" + targetCliCommand.join(" ") + "'");
|
||||
|
||||
let logCommandTmp = command + " " + args.join(" ");
|
||||
let logCommand = "";
|
||||
|
|
@ -405,12 +519,12 @@ delete ${iscsiName}
|
|||
execClient.buildCommand(command, args),
|
||||
options
|
||||
);
|
||||
if (response.code != 0) {
|
||||
throw new Error(JSON.stringify(response));
|
||||
}
|
||||
driver.ctx.logger.verbose(
|
||||
"TargetCLI response: " + JSON.stringify(response)
|
||||
);
|
||||
if (response.code != 0) {
|
||||
throw response;
|
||||
}
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
const _ = require("lodash");
|
||||
const { ControllerZfsBaseDriver } = require("../controller-zfs");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const LocalCliExecClient = require("./exec").LocalCliClient;
|
||||
const registry = require("../../utils/registry");
|
||||
const { Zetabyte } = require("../../utils/zfs");
|
||||
|
|
@ -95,7 +96,7 @@ class ControllerZfsLocalDriver extends ControllerZfsBaseDriver {
|
|||
case "filesystem":
|
||||
return ["zfs"];
|
||||
case "volume":
|
||||
return ["btrfs", "ext3", "ext4", "ext4dev", "xfs"];
|
||||
return GeneralUtils.default_supported_block_filesystems();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
const _ = require("lodash");
|
||||
const { CsiBaseDriver } = require("../index");
|
||||
const { GrpcError, grpc } = require("../../utils/grpc");
|
||||
const sleep = require("../../utils/general").sleep;
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
const getLargestNumber = require("../../utils/general").getLargestNumber;
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
|
@ -201,9 +201,9 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
const driverZfsResourceType = this.getDriverZfsResourceType();
|
||||
switch (driverZfsResourceType) {
|
||||
case "filesystem":
|
||||
return ["nfs", "cifs"];
|
||||
return GeneralUtils.default_supported_file_filesystems();
|
||||
case "volume":
|
||||
return ["btrfs", "ext3", "ext4", "ext4dev", "xfs"];
|
||||
return GeneralUtils.default_supported_block_filesystems();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -620,6 +620,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
|
||||
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
|
||||
let name = call.request.name;
|
||||
let volume_id = await driver.getVolumeIdFromName(name);
|
||||
let volume_content_source = call.request.volume_content_source;
|
||||
|
||||
if (!datasetParentName) {
|
||||
|
|
@ -710,7 +711,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
* NOTE: avoid the urge to templatize this given the name length limits for zvols
|
||||
* ie: namespace-name may quite easily exceed 58 chars
|
||||
*/
|
||||
const datasetName = datasetParentName + "/" + name;
|
||||
const datasetName = datasetParentName + "/" + volume_id;
|
||||
|
||||
// ensure volumes with the same name being requested a 2nd time but with a different size fails
|
||||
try {
|
||||
|
|
@ -862,7 +863,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
volume_content_source_snapshot_id +
|
||||
"@" +
|
||||
VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX +
|
||||
name;
|
||||
volume_id;
|
||||
}
|
||||
|
||||
driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName);
|
||||
|
|
@ -909,6 +910,12 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
});
|
||||
} else {
|
||||
try {
|
||||
// remove readonly/undesired props
|
||||
let cloneProperties = volumeProperties;
|
||||
delete cloneProperties["aclmode"];
|
||||
delete cloneProperties["aclinherit"];
|
||||
delete cloneProperties["acltype"];
|
||||
delete cloneProperties["casesensitivity"];
|
||||
response = await zb.zfs.clone(fullSnapshotName, datasetName, {
|
||||
properties: volumeProperties,
|
||||
});
|
||||
|
|
@ -971,7 +978,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
volume_content_source_volume_id +
|
||||
"@" +
|
||||
VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX +
|
||||
name;
|
||||
volume_id;
|
||||
|
||||
driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName);
|
||||
|
||||
|
|
@ -1024,9 +1031,15 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
} else {
|
||||
// create clone
|
||||
// zfs origin property contains parent info, ie: pool0/k8s/test/PVC-111@clone-test
|
||||
// remove readonly/undesired props
|
||||
let cloneProperties = volumeProperties;
|
||||
delete cloneProperties["aclmode"];
|
||||
delete cloneProperties["aclinherit"];
|
||||
delete cloneProperties["acltype"];
|
||||
delete cloneProperties["casesensitivity"];
|
||||
try {
|
||||
response = await zb.zfs.clone(fullSnapshotName, datasetName, {
|
||||
properties: volumeProperties,
|
||||
properties: cloneProperties,
|
||||
});
|
||||
} catch (err) {
|
||||
if (err.toString().includes("dataset does not exist")) {
|
||||
|
|
@ -1128,8 +1141,13 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
// TODO: this is unsfafe approach, make it better
|
||||
// probably could see if ^-.*\s and split and then shell escape
|
||||
if (this.options.zfs.datasetPermissionsAcls) {
|
||||
let aclBinary = _.get(
|
||||
driver.options,
|
||||
"zfs.datasetPermissionsAclsBinary",
|
||||
"setfacl"
|
||||
);
|
||||
for (const acl of this.options.zfs.datasetPermissionsAcls) {
|
||||
command = execClient.buildCommand("setfacl", [
|
||||
command = execClient.buildCommand(aclBinary, [
|
||||
acl,
|
||||
properties.mountpoint.value,
|
||||
]);
|
||||
|
|
@ -1147,7 +1165,6 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
case "volume":
|
||||
// set properties
|
||||
|
|
@ -1191,7 +1208,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
|
||||
const res = {
|
||||
volume: {
|
||||
volume_id: name,
|
||||
volume_id,
|
||||
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||
capacity_bytes:
|
||||
this.options.zfs.datasetEnableQuotas ||
|
||||
|
|
@ -1301,27 +1318,24 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
// NOTE: -R will recursively delete items + dependent filesets
|
||||
// delete dataset
|
||||
try {
|
||||
let max_tries = 5;
|
||||
let sleep_time = 3000;
|
||||
let current_try = 1;
|
||||
let success = false;
|
||||
while (!success && current_try <= max_tries) {
|
||||
try {
|
||||
await GeneralUtils.retry(
|
||||
12,
|
||||
5000,
|
||||
async () => {
|
||||
await zb.zfs.destroy(datasetName, { recurse: true, force: true });
|
||||
success = true;
|
||||
} catch (err) {
|
||||
if (err.toString().includes("dataset is busy")) {
|
||||
current_try++;
|
||||
if (current_try > max_tries) {
|
||||
throw err;
|
||||
} else {
|
||||
await sleep(sleep_time);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (
|
||||
err.toString().includes("dataset is busy") ||
|
||||
err.toString().includes("target is busy")
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
throw err;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
}
|
||||
);
|
||||
} catch (err) {
|
||||
if (err.toString().includes("filesystem has dependent clones")) {
|
||||
throw new GrpcError(
|
||||
|
|
@ -2190,7 +2204,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
});
|
||||
|
||||
// let things settle down
|
||||
//await sleep(3000);
|
||||
//await GneralUtils.sleep(3000);
|
||||
} else {
|
||||
try {
|
||||
await zb.zfs.snapshot(fullSnapshotName, {
|
||||
|
|
@ -2198,7 +2212,7 @@ class ControllerZfsBaseDriver extends CsiBaseDriver {
|
|||
});
|
||||
|
||||
// let things settle down
|
||||
//await sleep(3000);
|
||||
//await GeneralUtils.sleep(3000);
|
||||
} catch (err) {
|
||||
if (err.toString().includes("dataset does not exist")) {
|
||||
throw new GrpcError(
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ function factory(ctx, options) {
|
|||
case "synology-iscsi":
|
||||
return new ControllerSynologyDriver(ctx, options);
|
||||
case "zfs-generic-nfs":
|
||||
case "zfs-generic-smb":
|
||||
case "zfs-generic-iscsi":
|
||||
return new ControllerZfsGenericDriver(ctx, options);
|
||||
case "zfs-local-dataset":
|
||||
|
|
|
|||
|
|
@ -4,9 +4,8 @@ const { CsiBaseDriver } = require("../index");
|
|||
const HttpClient = require("./http").Client;
|
||||
const TrueNASApiClient = require("./http/api").Api;
|
||||
const { Zetabyte } = require("../../utils/zfs");
|
||||
const getLargestNumber = require("../../utils/general").getLargestNumber;
|
||||
const registry = require("../../utils/registry");
|
||||
const sleep = require("../../utils/general").sleep;
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
const uuidv4 = require("uuid").v4;
|
||||
|
|
@ -262,7 +261,27 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
break;
|
||||
}
|
||||
|
||||
response = await httpClient.post("/sharing/nfs", share);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.post("/sharing/nfs", share);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
/**
|
||||
* v1 = 201
|
||||
|
|
@ -483,7 +502,27 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
break;
|
||||
}
|
||||
|
||||
response = await httpClient.post(endpoint, share);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.post(endpoint, share);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
/**
|
||||
* v1 = 201
|
||||
|
|
@ -1363,7 +1402,27 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
});
|
||||
|
||||
if (deleteAsset) {
|
||||
response = await httpClient.delete(endpoint);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.delete(endpoint);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// returns a 500 if does not exist
|
||||
// v1 = 204
|
||||
|
|
@ -1444,12 +1503,35 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
});
|
||||
|
||||
if (deleteAsset) {
|
||||
response = await httpClient.delete(endpoint);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.delete(endpoint);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// returns a 500 if does not exist
|
||||
// v1 = 204
|
||||
// v2 = 200
|
||||
if (![200, 204].includes(response.statusCode)) {
|
||||
if (
|
||||
![200, 204].includes(response.statusCode) &&
|
||||
!JSON.stringify(response.body).includes("does not exist")
|
||||
) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`received error deleting smb share - share: ${shareId} code: ${
|
||||
|
|
@ -1477,7 +1559,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
break;
|
||||
case "iscsi":
|
||||
// Delete target
|
||||
// NOTE: deletting a target inherently deletes associated targetgroup(s) and targettoextent(s)
|
||||
// NOTE: deleting a target inherently deletes associated targetgroup(s) and targettoextent(s)
|
||||
|
||||
// Delete extent
|
||||
try {
|
||||
|
|
@ -1565,7 +1647,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
targetId,
|
||||
retries
|
||||
);
|
||||
await sleep(retryWait);
|
||||
await GeneralUtils.sleep(retryWait);
|
||||
response = await httpClient.delete(endpoint);
|
||||
}
|
||||
|
||||
|
|
@ -1958,7 +2040,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
if (capability.access_type == "mount") {
|
||||
if (
|
||||
capability.mount.fs_type &&
|
||||
!["btrfs", "ext3", "ext4", "ext4dev", "xfs"].includes(
|
||||
!GeneralUtils.default_supported_block_filesystems().includes(
|
||||
capability.mount.fs_type
|
||||
)
|
||||
) {
|
||||
|
|
@ -2025,6 +2107,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
*/
|
||||
async Probe(call) {
|
||||
const driver = this;
|
||||
const httpApiClient = await driver.getTrueNASHttpApiClient();
|
||||
|
||||
if (driver.ctx.args.csiMode.includes("controller")) {
|
||||
let datasetParentName = this.getVolumeParentDatasetName() + "/";
|
||||
|
|
@ -2039,6 +2122,14 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
`datasetParentName and detachedSnapshotsDatasetParentName must not overlap`
|
||||
);
|
||||
}
|
||||
|
||||
if (!(await httpApiClient.getIsScale())) {
|
||||
throw new GrpcError(
|
||||
grpc.status.FAILED_PRECONDITION,
|
||||
`driver is only availalbe with TrueNAS SCALE`
|
||||
);
|
||||
}
|
||||
|
||||
return { ready: { value: true } };
|
||||
} else {
|
||||
return { ready: { value: true } };
|
||||
|
|
@ -2066,6 +2157,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
let snapshotParentDatasetName = this.getDetachedSnapshotParentDatasetName();
|
||||
let zvolBlocksize = this.options.zfs.zvolBlocksize || "16K";
|
||||
let name = call.request.name;
|
||||
let volume_id = await driver.getVolumeIdFromName(name);
|
||||
let volume_content_source = call.request.volume_content_source;
|
||||
let minimum_volume_size = await driver.getMinimumVolumeSize();
|
||||
let default_required_bytes = 1073741824;
|
||||
|
|
@ -2171,7 +2263,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
* NOTE: avoid the urge to templatize this given the name length limits for zvols
|
||||
* ie: namespace-name may quite easily exceed 58 chars
|
||||
*/
|
||||
const datasetName = datasetParentName + "/" + name;
|
||||
const datasetName = datasetParentName + "/" + volume_id;
|
||||
|
||||
// ensure volumes with the same name being requested a 2nd time but with a different size fails
|
||||
try {
|
||||
|
|
@ -2326,7 +2418,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
volume_content_source_snapshot_id +
|
||||
"@" +
|
||||
VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX +
|
||||
name;
|
||||
volume_id;
|
||||
}
|
||||
|
||||
driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName);
|
||||
|
|
@ -2378,7 +2470,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
) {
|
||||
job = await httpApiClient.CoreGetJobs({ id: job_id });
|
||||
job = job[0];
|
||||
await sleep(3000);
|
||||
await GeneralUtils.sleep(3000);
|
||||
}
|
||||
|
||||
job.error = job.error || "";
|
||||
|
|
@ -2488,7 +2580,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
volume_content_source_volume_id +
|
||||
"@" +
|
||||
VOLUME_SOURCE_CLONE_SNAPSHOT_PREFIX +
|
||||
name;
|
||||
volume_id;
|
||||
|
||||
driver.ctx.logger.debug("full snapshot name: %s", fullSnapshotName);
|
||||
|
||||
|
|
@ -2538,7 +2630,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
) {
|
||||
job = await httpApiClient.CoreGetJobs({ id: job_id });
|
||||
job = job[0];
|
||||
await sleep(3000);
|
||||
await GeneralUtils.sleep(3000);
|
||||
}
|
||||
|
||||
job.error = job.error || "";
|
||||
|
|
@ -2626,6 +2718,9 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
volsize: driverZfsResourceType == "volume" ? capacity_bytes : undefined,
|
||||
sparse: driverZfsResourceType == "volume" ? sparse : undefined,
|
||||
create_ancestors: true,
|
||||
share_type: driver.getDriverShareType().includes("smb")
|
||||
? "SMB"
|
||||
: "GENERIC",
|
||||
user_properties: httpApiClient.getPropertiesKeyValueArray(
|
||||
httpApiClient.getUserProperties(volumeProperties)
|
||||
),
|
||||
|
|
@ -2721,7 +2816,18 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
}
|
||||
|
||||
if (setPerms) {
|
||||
await httpApiClient.FilesystemSetperm(perms);
|
||||
response = await httpApiClient.FilesystemSetperm(perms);
|
||||
await httpApiClient.CoreWaitForJob(response, 30);
|
||||
// SetPerm does not alter ownership with extended ACLs
|
||||
// run this in addition just for good measure
|
||||
if (perms.uid || perms.gid) {
|
||||
response = await httpApiClient.FilesystemChown({
|
||||
path: perms.path,
|
||||
uid: perms.uid,
|
||||
gid: perms.gid,
|
||||
});
|
||||
await httpApiClient.CoreWaitForJob(response, 30);
|
||||
}
|
||||
}
|
||||
|
||||
// set acls
|
||||
|
|
@ -2777,7 +2883,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
|
||||
const res = {
|
||||
volume: {
|
||||
volume_id: name,
|
||||
volume_id,
|
||||
//capacity_bytes: capacity_bytes, // kubernetes currently pukes if capacity is returned as 0
|
||||
capacity_bytes:
|
||||
this.options.zfs.datasetEnableQuotas ||
|
||||
|
|
@ -3649,7 +3755,10 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
// so we must be cognizant and use the highest possible value here
|
||||
// note that whatever value is returned here can/will essentially impact the refquota
|
||||
// value of a derived volume
|
||||
size_bytes = getLargestNumber(row.referenced, row.logicalreferenced);
|
||||
size_bytes = GeneralUtils.getLargestNumber(
|
||||
row.referenced,
|
||||
row.logicalreferenced
|
||||
);
|
||||
} else {
|
||||
// get the size of the parent volume
|
||||
size_bytes = row.volsize;
|
||||
|
|
@ -3930,7 +4039,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
while (!job || !["SUCCESS", "ABORTED", "FAILED"].includes(job.state)) {
|
||||
job = await httpApiClient.CoreGetJobs({ id: job_id });
|
||||
job = job[0];
|
||||
await sleep(3000);
|
||||
await GeneralUtils.sleep(3000);
|
||||
}
|
||||
|
||||
job.error = job.error || "";
|
||||
|
|
@ -4045,7 +4154,7 @@ class FreeNASApiDriver extends CsiBaseDriver {
|
|||
// so we must be cognizant and use the highest possible value here
|
||||
// note that whatever value is returned here can/will essentially impact the refquota
|
||||
// value of a derived volume
|
||||
size_bytes = getLargestNumber(
|
||||
size_bytes = GeneralUtils.getLargestNumber(
|
||||
properties.referenced.rawvalue,
|
||||
properties.logicalreferenced.rawvalue
|
||||
// TODO: perhaps include minimum volume size here?
|
||||
|
|
|
|||
|
|
@ -681,7 +681,13 @@ class Api {
|
|||
throw new Error(JSON.stringify(response.body));
|
||||
}
|
||||
|
||||
async CoreWaitForJob(job_id, timeout = 0) {
|
||||
/**
|
||||
*
|
||||
* @param {*} job_id
|
||||
* @param {*} timeout in seconds
|
||||
* @returns
|
||||
*/
|
||||
async CoreWaitForJob(job_id, timeout = 0, check_interval = 3000) {
|
||||
if (!job_id) {
|
||||
throw new Error("invalid job_id");
|
||||
}
|
||||
|
|
@ -692,16 +698,17 @@ class Api {
|
|||
let job;
|
||||
|
||||
// wait for job to finish
|
||||
while (!job || !["SUCCESS", "ABORTED", "FAILED"].includes(job.state)) {
|
||||
do {
|
||||
if (job) {
|
||||
await sleep(check_interval);
|
||||
}
|
||||
job = await this.CoreGetJobs({ id: job_id });
|
||||
job = job[0];
|
||||
await sleep(3000);
|
||||
|
||||
currentTime = Date.now() / 1000;
|
||||
if (timeout > 0 && currentTime > startTime + timeout) {
|
||||
throw new Error("timeout waiting for job to complete");
|
||||
}
|
||||
}
|
||||
} while (!["SUCCESS", "ABORTED", "FAILED"].includes(job.state));
|
||||
|
||||
return job;
|
||||
}
|
||||
|
|
@ -754,7 +761,38 @@ class Api {
|
|||
response = await httpClient.post(endpoint, data);
|
||||
|
||||
if (response.statusCode == 200) {
|
||||
return;
|
||||
return response.body;
|
||||
}
|
||||
|
||||
throw new Error(JSON.stringify(response.body));
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {*} data
|
||||
*/
|
||||
async FilesystemChown(data) {
|
||||
/*
|
||||
{
|
||||
"path": "string",
|
||||
"uid": 0,
|
||||
"gid": 0,
|
||||
"options": {
|
||||
"recursive": false,
|
||||
"traverse": false
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
const httpClient = await this.getHttpClient(false);
|
||||
let response;
|
||||
let endpoint;
|
||||
|
||||
endpoint = `/filesystem/chown`;
|
||||
response = await httpClient.post(endpoint, data);
|
||||
|
||||
if (response.statusCode == 200) {
|
||||
return response.body;
|
||||
}
|
||||
|
||||
throw new Error(JSON.stringify(response.body));
|
||||
|
|
|
|||
|
|
@ -86,6 +86,12 @@ class Client {
|
|||
httpAgent: this.getHttpAgent(),
|
||||
httpsAgent: this.getHttpsAgent(),
|
||||
timeout: 60 * 1000,
|
||||
validateStatus: function (status) {
|
||||
if (status >= 500) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
};
|
||||
|
||||
if (client.options.apiKey) {
|
||||
|
|
@ -122,10 +128,17 @@ class Client {
|
|||
_.set(options, prop, "redacted");
|
||||
}
|
||||
|
||||
delete options.httpAgent;
|
||||
delete options.httpsAgent;
|
||||
|
||||
this.logger.debug("FREENAS HTTP REQUEST: " + stringify(options));
|
||||
this.logger.debug("FREENAS HTTP ERROR: " + error);
|
||||
this.logger.debug("FREENAS HTTP STATUS: " + response.statusCode);
|
||||
this.logger.debug("FREENAS HTTP HEADERS: " + stringify(response.headers));
|
||||
this.logger.debug(
|
||||
"FREENAS HTTP STATUS: " + _.get(response, "statusCode", "")
|
||||
);
|
||||
this.logger.debug(
|
||||
"FREENAS HTTP HEADERS: " + stringify(_.get(response, "headers", ""))
|
||||
);
|
||||
this.logger.debug("FREENAS HTTP BODY: " + stringify(body));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,8 +4,9 @@ const { GrpcError, grpc } = require("../../utils/grpc");
|
|||
const registry = require("../../utils/registry");
|
||||
const SshClient = require("../../utils/ssh").SshClient;
|
||||
const HttpClient = require("./http").Client;
|
||||
const TrueNASApiClient = require("./http/api").Api;
|
||||
const { Zetabyte, ZfsSshProcessManager } = require("../../utils/zfs");
|
||||
const { sleep, stringify } = require("../../utils/general");
|
||||
const GeneralUtils = require("../../utils/general");
|
||||
|
||||
const Handlebars = require("handlebars");
|
||||
|
||||
|
|
@ -112,6 +113,13 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
);
|
||||
}
|
||||
|
||||
async getTrueNASHttpApiClient() {
|
||||
return registry.getAsync(`${__REGISTRY_NS__}:api_client`, async () => {
|
||||
const httpClient = await this.getHttpClient();
|
||||
return new TrueNASApiClient(httpClient, this.ctx.cache);
|
||||
});
|
||||
}
|
||||
|
||||
getDriverShareType() {
|
||||
switch (this.options.driver) {
|
||||
case "freenas-nfs":
|
||||
|
|
@ -300,7 +308,27 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
break;
|
||||
}
|
||||
|
||||
response = await httpClient.post("/sharing/nfs", share);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.post("/sharing/nfs", share);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
/**
|
||||
* v1 = 201
|
||||
|
|
@ -521,7 +549,27 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
break;
|
||||
}
|
||||
|
||||
response = await httpClient.post(endpoint, share);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.post(endpoint, share);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
/**
|
||||
* v1 = 201
|
||||
|
|
@ -1402,7 +1450,27 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
});
|
||||
|
||||
if (deleteAsset) {
|
||||
response = await httpClient.delete(endpoint);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.delete(endpoint);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// returns a 500 if does not exist
|
||||
// v1 = 204
|
||||
|
|
@ -1484,12 +1552,35 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
});
|
||||
|
||||
if (deleteAsset) {
|
||||
response = await httpClient.delete(endpoint);
|
||||
response = await GeneralUtils.retry(
|
||||
3,
|
||||
1000,
|
||||
async () => {
|
||||
return await httpClient.delete(endpoint);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "ECONNRESET") {
|
||||
return true;
|
||||
}
|
||||
if (err.code == "ECONNABORTED") {
|
||||
return true;
|
||||
}
|
||||
if (err.response && err.response.statusCode == 504) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
|
||||
// returns a 500 if does not exist
|
||||
// v1 = 204
|
||||
// v2 = 200
|
||||
if (![200, 204].includes(response.statusCode)) {
|
||||
if (
|
||||
![200, 204].includes(response.statusCode) &&
|
||||
!JSON.stringify(response.body).includes("does not exist")
|
||||
) {
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`received error deleting smb share - share: ${shareId} code: ${
|
||||
|
|
@ -1606,7 +1697,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
targetId,
|
||||
retries
|
||||
);
|
||||
await sleep(retryWait);
|
||||
await GeneralUtils.sleep(retryWait);
|
||||
response = await httpClient.delete(endpoint);
|
||||
}
|
||||
|
||||
|
|
@ -1716,6 +1807,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
async setFilesystemMode(path, mode) {
|
||||
const httpClient = await this.getHttpClient();
|
||||
const apiVersion = httpClient.getApiVersion();
|
||||
const httpApiClient = await this.getTrueNASHttpApiClient();
|
||||
|
||||
switch (apiVersion) {
|
||||
case 1:
|
||||
|
|
@ -1747,6 +1839,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
response = await httpClient.post(endpoint, perms);
|
||||
|
||||
if (response.statusCode == 200) {
|
||||
await httpApiClient.CoreWaitForJob(response.body, 30);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -1764,6 +1857,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
async setFilesystemOwnership(path, user = false, group = false) {
|
||||
const httpClient = await this.getHttpClient();
|
||||
const apiVersion = httpClient.getApiVersion();
|
||||
const httpApiClient = await this.getTrueNASHttpApiClient();
|
||||
|
||||
if (user === false || typeof user == "undefined" || user === null) {
|
||||
user = "";
|
||||
|
|
@ -1832,6 +1926,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
response = await httpClient.post(endpoint, perms);
|
||||
|
||||
if (response.statusCode == 200) {
|
||||
await httpApiClient.CoreWaitForJob(response.body, 30);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -2122,7 +2217,7 @@ class FreeNASSshDriver extends ControllerZfsBaseDriver {
|
|||
// likely bad creds/url
|
||||
throw new GrpcError(
|
||||
grpc.status.UNKNOWN,
|
||||
`FreeNAS error getting system version info: ${stringify({
|
||||
`FreeNAS error getting system version info: ${GeneralUtils.stringify({
|
||||
errors: versionErrors,
|
||||
responses: versionResponses,
|
||||
})}`
|
||||
|
|
|
|||
3584
src/driver/index.js
3584
src/driver/index.js
File diff suppressed because it is too large
Load Diff
|
|
@ -0,0 +1,269 @@
|
|||
const _ = require("lodash");
|
||||
const grpc = require("./grpc").grpc;
|
||||
const path = require("path");
|
||||
const protoLoader = require("@grpc/proto-loader");
|
||||
|
||||
const PROTO_BASE_PATH =
|
||||
path.dirname(path.dirname(__dirname)) + path.sep + "csi_proxy_proto";
|
||||
|
||||
/**
|
||||
* leave connection null as by default the named pipe is derrived
|
||||
*/
|
||||
const DEFAULT_SERVICES = {
|
||||
filesystem: { version: "v1", connection: null },
|
||||
disk: { version: "v1", connection: null },
|
||||
volume: { version: "v1", connection: null },
|
||||
smb: { version: "v1", connection: null },
|
||||
system: { version: "v1alpha1", connection: null },
|
||||
iscsi: { version: "v1alpha2", connection: null },
|
||||
};
|
||||
|
||||
function capitalize(s) {
|
||||
return s && s[0].toUpperCase() + s.slice(1);
|
||||
}
|
||||
|
||||
class CsiProxyClient {
|
||||
constructor(options = {}) {
|
||||
this.clients = {};
|
||||
|
||||
// initialize all clients
|
||||
const services = Object.assign(
|
||||
{},
|
||||
DEFAULT_SERVICES,
|
||||
options.services || {}
|
||||
);
|
||||
|
||||
const pipePrefix = options.pipe_prefix || "csi-proxy";
|
||||
|
||||
for (const serviceName in services) {
|
||||
const service = services[serviceName];
|
||||
const serviceVersion =
|
||||
service.version || DEFAULT_SERVICES[serviceName].version;
|
||||
const serviceConnection =
|
||||
// HANGS
|
||||
// Http2Session client (38) nghttp2 has 13 bytes to send directly
|
||||
// Http2Session client (38) wants read? 1
|
||||
// Then pipe closes after 60 seconds-ish
|
||||
service.connection ||
|
||||
`unix:////./pipe/${pipePrefix}-${serviceName}-${serviceVersion}`;
|
||||
// EACCESS
|
||||
//service.connection ||
|
||||
//`unix:///csi/${pipePrefix}-${serviceName}-${serviceVersion}`;
|
||||
//service.connection ||
|
||||
//`unix:///csi/csi.sock.internal`;
|
||||
|
||||
const PROTO_PATH = `${PROTO_BASE_PATH}\\${serviceName}\\${serviceVersion}\\api.proto`;
|
||||
|
||||
const packageDefinition = protoLoader.loadSync(PROTO_PATH, {
|
||||
keepCase: true,
|
||||
longs: String,
|
||||
enums: String,
|
||||
defaults: true,
|
||||
oneofs: true,
|
||||
includeDirs: [__dirname + "/../csi_proxy_proto"],
|
||||
});
|
||||
const protoDescriptor = grpc.loadPackageDefinition(packageDefinition);
|
||||
const serviceInstance = new protoDescriptor[serviceVersion][
|
||||
capitalize(serviceName)
|
||||
](serviceConnection, grpc.credentials.createInsecure());
|
||||
this.clients[serviceName] = serviceInstance;
|
||||
}
|
||||
}
|
||||
|
||||
async executeRPC(serviceName, methodName, options = {}) {
|
||||
function rescursivePathFixer(obj) {
|
||||
for (const k in obj) {
|
||||
if (typeof obj[k] == "object" && obj[k] !== null) {
|
||||
rescursivePathFixer(obj[k]);
|
||||
} else {
|
||||
if (k.includes("path")) {
|
||||
obj[k] = obj[k].replaceAll("/", "\\");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rescursivePathFixer(options);
|
||||
|
||||
const cleansedOptions = JSON.parse(JSON.stringify(options));
|
||||
// This function handles arrays and objects
|
||||
function recursiveCleanse(obj) {
|
||||
for (const k in obj) {
|
||||
if (typeof obj[k] == "object" && obj[k] !== null) {
|
||||
recursiveCleanse(obj[k]);
|
||||
} else {
|
||||
if (
|
||||
k.includes("secret") ||
|
||||
k.includes("username") ||
|
||||
k.includes("password")
|
||||
) {
|
||||
obj[k] = "redacted";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
recursiveCleanse(cleansedOptions);
|
||||
|
||||
console.log(
|
||||
"csi-proxy request %s/%s - data: %j",
|
||||
capitalize(serviceName),
|
||||
methodName,
|
||||
cleansedOptions
|
||||
);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const functionRef = this.clients[serviceName.toLowerCase()][methodName];
|
||||
if (!functionRef) {
|
||||
reject(
|
||||
new Error(
|
||||
`missing method ${methodName} on service ${capitalize(serviceName)}`
|
||||
)
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
this.clients[serviceName.toLowerCase()][methodName](
|
||||
options,
|
||||
(error, data) => {
|
||||
console.log(
|
||||
"csi-proxy response %s/%s - error: %j, data: %j",
|
||||
capitalize(serviceName),
|
||||
methodName,
|
||||
error,
|
||||
data
|
||||
);
|
||||
|
||||
if (error) {
|
||||
reject(error);
|
||||
}
|
||||
|
||||
resolve(data);
|
||||
}
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a disk_number if the target has 0 or 1 disks
|
||||
*
|
||||
* @param {*} target_portal
|
||||
* @param {*} iqn
|
||||
* @returns
|
||||
*/
|
||||
async getDiskNumberFromIscsiTarget(target_portal, iqn) {
|
||||
let result;
|
||||
|
||||
if (typeof target_portal != "object") {
|
||||
target_portal = {
|
||||
target_address: target_portal.split(":")[0],
|
||||
target_port: target_portal.split(":")[1] || 3260,
|
||||
};
|
||||
}
|
||||
|
||||
// get device
|
||||
try {
|
||||
result = await this.executeRPC("iscsi", "GetTargetDisks", {
|
||||
target_portal,
|
||||
iqn,
|
||||
});
|
||||
} catch (e) {
|
||||
let details = _.get(e, "details", "");
|
||||
if (!details.includes("ObjectNotFound")) {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
let diskIds = _.get(result, "diskIDs", []);
|
||||
if (diskIds.length > 1) {
|
||||
throw new Error(
|
||||
`${diskIds.length} disks on the target, no way to know which is the relevant disk`
|
||||
);
|
||||
}
|
||||
|
||||
return diskIds[0];
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a volume_id if the disk has 0 or 1 volumes
|
||||
*
|
||||
* @param {*} disk_number
|
||||
* @returns
|
||||
*/
|
||||
async getVolumeIdFromDiskNumber(disk_number) {
|
||||
let result;
|
||||
|
||||
if (disk_number == 0 || disk_number > 0) {
|
||||
result = await this.executeRPC("volume", "ListVolumesOnDisk", {
|
||||
disk_number,
|
||||
});
|
||||
|
||||
let volume_ids = _.get(result, "volume_ids", []);
|
||||
/**
|
||||
* the 1st partition is a sort of system partion and is ""
|
||||
* usually around 15MB in size
|
||||
*/
|
||||
volume_ids = volume_ids.filter((item) => {
|
||||
return Boolean(item);
|
||||
});
|
||||
|
||||
if (volume_ids.length > 1) {
|
||||
throw new Error(
|
||||
`${volume_ids.length} volumes on the disk, no way to know which is the relevant volume`
|
||||
);
|
||||
}
|
||||
|
||||
// ok of null/undefined
|
||||
return volume_ids[0];
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a volume_id if the target and disk both have 0 or 1 entries
|
||||
*
|
||||
* @param {*} target_portal
|
||||
* @param {*} iqn
|
||||
* @returns
|
||||
*/
|
||||
async getVolumeIdFromIscsiTarget(target_portal, iqn) {
|
||||
const disk_number = await this.getDiskNumberFromIscsiTarget(...arguments);
|
||||
return await this.getVolumeIdFromDiskNumber(disk_number);
|
||||
}
|
||||
|
||||
async FilesystemPathExists(path) {
|
||||
let result;
|
||||
try {
|
||||
result = await this.executeRPC("filesystem", "PathExists", {
|
||||
path,
|
||||
});
|
||||
|
||||
return result.exists;
|
||||
} catch (e) {
|
||||
let details = _.get(e, "details", "");
|
||||
if (details.includes("not an absolute Windows path")) {
|
||||
return false;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async FilesystemIsSymlink(path) {
|
||||
let result;
|
||||
try {
|
||||
result = await this.executeRPC("filesystem", "IsSymlink", {
|
||||
path,
|
||||
});
|
||||
|
||||
return result.is_symlink;
|
||||
} catch (e) {
|
||||
let details = _.get(e, "details", "");
|
||||
if (details.includes("not an absolute Windows path")) {
|
||||
return false;
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.CsiProxyClient = CsiProxyClient;
|
||||
|
|
@ -1,5 +1,7 @@
|
|||
const cp = require("child_process");
|
||||
const fs = require("fs");
|
||||
const GeneralUtils = require("./general");
|
||||
const path = require("path");
|
||||
|
||||
const DEFAULT_TIMEOUT = process.env.FILESYSTEM_DEFAULT_TIMEOUT || 30000;
|
||||
|
||||
|
|
@ -25,6 +27,10 @@ class Filesystem {
|
|||
}
|
||||
}
|
||||
|
||||
covertUnixSeparatorToWindowsSeparator(p) {
|
||||
return p.replaceAll(path.posix.sep, path.win32.sep);
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to discover if device is a block device
|
||||
*
|
||||
|
|
@ -223,8 +229,12 @@ class Filesystem {
|
|||
}
|
||||
}
|
||||
|
||||
async isSymbolicLink(path) {
|
||||
return fs.lstatSync(path).isSymbolicLink();
|
||||
}
|
||||
|
||||
/**
|
||||
* create symlink
|
||||
* remove file
|
||||
*
|
||||
* @param {*} device
|
||||
*/
|
||||
|
|
@ -298,7 +308,7 @@ class Filesystem {
|
|||
async getBlockDevice(device) {
|
||||
const filesystem = this;
|
||||
device = await filesystem.realpath(device);
|
||||
let args = ["-a", "-b", "-l", "-J", "-O"];
|
||||
let args = ["-a", "-b", "-J", "-O"];
|
||||
args.push(device);
|
||||
let result;
|
||||
|
||||
|
|
@ -312,30 +322,214 @@ class Filesystem {
|
|||
}
|
||||
|
||||
/**
|
||||
* blkid -p -o export <device>
|
||||
*
|
||||
* @param {*} device
|
||||
* @returns
|
||||
*/
|
||||
async getBlockDeviceLargestPartition(device) {
|
||||
const filesystem = this;
|
||||
let block_device_info = await filesystem.getBlockDevice(device);
|
||||
if (block_device_info.children) {
|
||||
let child;
|
||||
for (const child_i of block_device_info.children) {
|
||||
if (child_i.type == "part") {
|
||||
if (!child) {
|
||||
child = child_i;
|
||||
} else {
|
||||
if (child_i.size > child.size) {
|
||||
child = child_i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return `${child.path}`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {*} device
|
||||
* @returns
|
||||
*/
|
||||
async getBlockDeviceLastPartition(device) {
|
||||
const filesystem = this;
|
||||
let block_device_info = await filesystem.getBlockDevice(device);
|
||||
if (block_device_info.children) {
|
||||
let child;
|
||||
for (const child_i of block_device_info.children) {
|
||||
if (child_i.type == "part") {
|
||||
if (!child) {
|
||||
child = child_i;
|
||||
} else {
|
||||
let minor = child["maj:min"].split(":")[1];
|
||||
let minor_i = child_i["maj:min"].split(":")[1];
|
||||
if (minor_i > minor) {
|
||||
child = child_i;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return `${child.path}`;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {*} device
|
||||
* @returns
|
||||
*/
|
||||
async getBlockDevicePartitionCount(device) {
|
||||
const filesystem = this;
|
||||
let count = 0;
|
||||
let block_device_info = await filesystem.getBlockDevice(device);
|
||||
if (block_device_info.children) {
|
||||
for (const child_i of block_device_info.children) {
|
||||
if (child_i.type == "part") {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
async getBlockDeviceHasParitionTable(device) {
|
||||
const filesystem = this;
|
||||
let block_device_info = await filesystem.getBlockDevice(device);
|
||||
|
||||
return block_device_info.pttype ? true : false;
|
||||
}
|
||||
|
||||
/**
|
||||
* DOS
|
||||
* - type=83 = Linux
|
||||
* - type=07 = HPFS/NTFS/exFAT
|
||||
*
|
||||
* GPT
|
||||
* - type=0FC63DAF-8483-4772-8E79-3D69D8477DE4 = linux
|
||||
* - type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7 = ntfs
|
||||
* - type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B = EFI
|
||||
*
|
||||
* @param {*} device
|
||||
* @param {*} label
|
||||
* @param {*} type
|
||||
*/
|
||||
async partitionDevice(
|
||||
device,
|
||||
label = "gpt",
|
||||
type = "0FC63DAF-8483-4772-8E79-3D69D8477DE4"
|
||||
) {
|
||||
const filesystem = this;
|
||||
let args = [device];
|
||||
let result;
|
||||
|
||||
try {
|
||||
result = await filesystem.exec("sfdisk", args, {
|
||||
stdin: `label: ${label}\n`,
|
||||
});
|
||||
result = await filesystem.exec("sfdisk", args, {
|
||||
stdin: `type=${type}\n`,
|
||||
});
|
||||
} catch (err) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* mimic the behavior of partitioning a new data drive in windows directly
|
||||
*
|
||||
* https://en.wikipedia.org/wiki/Microsoft_Reserved_Partition
|
||||
*
|
||||
* @param {*} device
|
||||
*/
|
||||
async partitionDeviceWindows(device) {
|
||||
const filesystem = this;
|
||||
let args = [device];
|
||||
let result;
|
||||
let block_device_info = await filesystem.getBlockDevice(device);
|
||||
|
||||
//let sixteen_megabytes = 16777216;
|
||||
//let thirtytwo_megabytes = 33554432;
|
||||
//let onehundredtwentyeight_megabytes = 134217728;
|
||||
|
||||
let msr_partition_size = "16M";
|
||||
let label = "gpt";
|
||||
let msr_guid = "E3C9E316-0B5C-4DB8-817D-F92DF00215AE";
|
||||
let ntfs_guid = "EBD0A0A2-B9E5-4433-87C0-68B6B72699C7";
|
||||
|
||||
if (block_device_info.type != "disk") {
|
||||
throw new Error(
|
||||
`cannot partition device of type: ${block_device_info.type}`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* On drives less than 16GB in size, the MSR is 32MB.
|
||||
* On drives greater than or equal two 16GB, the MSR is 128 MB.
|
||||
* It is only 128 MB for Win 7/8 ( On drives less than 16GB in size, the MSR is 32MB ) & 16 MB for win 10!
|
||||
*/
|
||||
let msr_partition_size_break = 17179869184; // 16GB
|
||||
|
||||
// TODO: this size may be sectors so not really disk size in terms of GB
|
||||
if (block_device_info.size >= msr_partition_size_break) {
|
||||
// ignoring for now, appears windows 10+ use 16MB always
|
||||
//msr_partition_size = "128M";
|
||||
}
|
||||
|
||||
try {
|
||||
result = await filesystem.exec("sfdisk", args, {
|
||||
stdin: `label: ${label}\n`,
|
||||
});
|
||||
// must send ALL partitions at once (newline separated), cannot send them 1 at a time
|
||||
result = await filesystem.exec("sfdisk", args, {
|
||||
stdin: `size=${msr_partition_size},type=${msr_guid}\ntype=${ntfs_guid}\n`,
|
||||
});
|
||||
} catch (err) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @param {*} device
|
||||
*/
|
||||
async deviceIsFormatted(device) {
|
||||
const filesystem = this;
|
||||
let args = ["-p", "-o", "export", device];
|
||||
let result;
|
||||
|
||||
try {
|
||||
result = await filesystem.exec("blkid", args);
|
||||
result = await filesystem.getBlockDevice(device);
|
||||
return result.fstype ? true : false;
|
||||
} catch (err) {
|
||||
if (err.code == 2 && err.stderr.includes("No such device or address")) {
|
||||
throw err;
|
||||
}
|
||||
|
||||
if (err.code == 2) {
|
||||
return false;
|
||||
}
|
||||
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
async deviceIsIscsi(device) {
|
||||
const filesystem = this;
|
||||
let result;
|
||||
|
||||
do {
|
||||
if (result) {
|
||||
device = `/dev/${result.pkname}`;
|
||||
}
|
||||
result = await filesystem.getBlockDevice(device);
|
||||
} while (result.pkname);
|
||||
|
||||
return result && result.tran == "iscsi";
|
||||
}
|
||||
|
||||
async getBlockDeviceParent(device) {
|
||||
const filesystem = this;
|
||||
let result;
|
||||
|
||||
do {
|
||||
if (result) {
|
||||
device = `/dev/${result.pkname}`;
|
||||
}
|
||||
result = await filesystem.getBlockDevice(device);
|
||||
} while (result.pkname);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -438,6 +632,31 @@ class Filesystem {
|
|||
}
|
||||
}
|
||||
|
||||
async expandPartition(device) {
|
||||
const filesystem = this;
|
||||
const command = "growpart";
|
||||
const args = [];
|
||||
|
||||
let block_device_info = await filesystem.getBlockDevice(device);
|
||||
let device_fs_info = await filesystem.getDeviceFilesystemInfo(device);
|
||||
let growpart_partition = device_fs_info["part_entry_number"];
|
||||
let parent_block_device = await filesystem.getBlockDeviceParent(device);
|
||||
|
||||
args.push(parent_block_device.path, growpart_partition);
|
||||
|
||||
try {
|
||||
await filesystem.exec(command, args);
|
||||
} catch (err) {
|
||||
if (
|
||||
err.code == 1 &&
|
||||
err.stdout &&
|
||||
err.stdout.includes("could only be grown by")
|
||||
) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* expand a given filesystem
|
||||
*
|
||||
|
|
@ -458,6 +677,9 @@ class Filesystem {
|
|||
args = args.concat(["filesystem", "resize", "max"]);
|
||||
args.push(device); // in this case should be a mounted path
|
||||
break;
|
||||
case "exfat":
|
||||
// https://github.com/exfatprogs/exfatprogs/issues/134
|
||||
return;
|
||||
case "ext4":
|
||||
case "ext3":
|
||||
case "ext4dev":
|
||||
|
|
@ -465,6 +687,16 @@ class Filesystem {
|
|||
args = args.concat(options);
|
||||
args.push(device);
|
||||
break;
|
||||
case "ntfs":
|
||||
// must be unmounted
|
||||
command = "ntfsresize";
|
||||
await filesystem.exec(command, ["-c", device]);
|
||||
await filesystem.exec(command, ["-n", device]);
|
||||
args = args.concat("-P", "-f");
|
||||
args = args.concat(options);
|
||||
//args = args.concat(["-s", "max"]);
|
||||
args.push(device);
|
||||
break;
|
||||
case "xfs":
|
||||
command = "xfs_growfs";
|
||||
args = args.concat(options);
|
||||
|
|
@ -481,6 +713,10 @@ class Filesystem {
|
|||
|
||||
try {
|
||||
result = await filesystem.exec(command, args);
|
||||
// must clear the dirty bit after resize
|
||||
if (fstype.toLowerCase() == "ntfs") {
|
||||
await filesystem.exec("ntfsfix", ["-d", device]);
|
||||
}
|
||||
return result;
|
||||
} catch (err) {
|
||||
throw err;
|
||||
|
|
@ -521,6 +757,15 @@ class Filesystem {
|
|||
args.push("-f");
|
||||
args.push("-p");
|
||||
break;
|
||||
case "ntfs":
|
||||
/**
|
||||
* -b, --clear-bad-sectors Clear the bad sector list
|
||||
* -d, --clear-dirty Clear the volume dirty flag
|
||||
*/
|
||||
command = "ntfsfix";
|
||||
args.puuh("-d");
|
||||
args.push(device);
|
||||
break;
|
||||
case "xfs":
|
||||
command = "xfs_repair";
|
||||
args = args.concat(["-o", "force_geometry"]);
|
||||
|
|
@ -589,16 +834,31 @@ class Filesystem {
|
|||
* @param {*} path
|
||||
*/
|
||||
async pathExists(path) {
|
||||
const filesystem = this;
|
||||
let args = [];
|
||||
args.push(path);
|
||||
|
||||
let result = false;
|
||||
try {
|
||||
await filesystem.exec("stat", args);
|
||||
await GeneralUtils.retry(
|
||||
10,
|
||||
200,
|
||||
() => {
|
||||
fs.statSync(path);
|
||||
},
|
||||
{
|
||||
retryCondition: (err) => {
|
||||
if (err.code == "UNKNOWN") {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
},
|
||||
}
|
||||
);
|
||||
result = true;
|
||||
} catch (err) {
|
||||
return false;
|
||||
if (err.code !== "ENOENT") {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
exec(command, args, options = {}) {
|
||||
|
|
@ -607,6 +867,12 @@ class Filesystem {
|
|||
//options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
let stdin;
|
||||
if (options.stdin) {
|
||||
stdin = options.stdin;
|
||||
delete options.stdin;
|
||||
}
|
||||
|
||||
const filesystem = this;
|
||||
args = args || [];
|
||||
|
||||
|
|
@ -614,13 +880,27 @@ class Filesystem {
|
|||
args.unshift(command);
|
||||
command = filesystem.options.paths.sudo;
|
||||
}
|
||||
console.log("executing filesystem command: %s %s", command, args.join(" "));
|
||||
|
||||
let command_log = `${command} ${args.join(" ")}`.trim();
|
||||
if (stdin) {
|
||||
command_log = `echo '${stdin}' | ${command_log}`
|
||||
.trim()
|
||||
.replace(/\n/, "\\n");
|
||||
}
|
||||
console.log("executing filesystem command: %s", command_log);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = filesystem.options.executor.spawn(command, args, options);
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
|
||||
child.on("spawn", function () {
|
||||
if (stdin) {
|
||||
child.stdin.setEncoding("utf-8");
|
||||
child.stdin.write(stdin);
|
||||
child.stdin.end();
|
||||
}
|
||||
});
|
||||
|
||||
child.stdout.on("data", function (data) {
|
||||
stdout = stdout + data;
|
||||
});
|
||||
|
|
|
|||
|
|
@ -1,4 +1,6 @@
|
|||
const _ = require("lodash");
|
||||
const axios = require("axios");
|
||||
const crypto = require("crypto");
|
||||
|
||||
function sleep(ms) {
|
||||
return new Promise((resolve) => {
|
||||
|
|
@ -6,6 +8,64 @@ function sleep(ms) {
|
|||
});
|
||||
}
|
||||
|
||||
function md5(val) {
|
||||
return crypto.createHash("md5").update(val).digest("hex");
|
||||
}
|
||||
|
||||
function crc32(val) {
|
||||
for (var a, o = [], c = 0; c < 256; c++) {
|
||||
a = c;
|
||||
for (var f = 0; f < 8; f++) a = 1 & a ? 3988292384 ^ (a >>> 1) : a >>> 1;
|
||||
o[c] = a;
|
||||
}
|
||||
for (var n = -1, t = 0; t < val.length; t++)
|
||||
n = (n >>> 8) ^ o[255 & (n ^ val.charCodeAt(t))];
|
||||
return (-1 ^ n) >>> 0;
|
||||
}
|
||||
|
||||
const crctab16 = new Uint16Array([
|
||||
0x0000, 0x1189, 0x2312, 0x329b, 0x4624, 0x57ad, 0x6536, 0x74bf, 0x8c48,
|
||||
0x9dc1, 0xaf5a, 0xbed3, 0xca6c, 0xdbe5, 0xe97e, 0xf8f7, 0x1081, 0x0108,
|
||||
0x3393, 0x221a, 0x56a5, 0x472c, 0x75b7, 0x643e, 0x9cc9, 0x8d40, 0xbfdb,
|
||||
0xae52, 0xdaed, 0xcb64, 0xf9ff, 0xe876, 0x2102, 0x308b, 0x0210, 0x1399,
|
||||
0x6726, 0x76af, 0x4434, 0x55bd, 0xad4a, 0xbcc3, 0x8e58, 0x9fd1, 0xeb6e,
|
||||
0xfae7, 0xc87c, 0xd9f5, 0x3183, 0x200a, 0x1291, 0x0318, 0x77a7, 0x662e,
|
||||
0x54b5, 0x453c, 0xbdcb, 0xac42, 0x9ed9, 0x8f50, 0xfbef, 0xea66, 0xd8fd,
|
||||
0xc974, 0x4204, 0x538d, 0x6116, 0x709f, 0x0420, 0x15a9, 0x2732, 0x36bb,
|
||||
0xce4c, 0xdfc5, 0xed5e, 0xfcd7, 0x8868, 0x99e1, 0xab7a, 0xbaf3, 0x5285,
|
||||
0x430c, 0x7197, 0x601e, 0x14a1, 0x0528, 0x37b3, 0x263a, 0xdecd, 0xcf44,
|
||||
0xfddf, 0xec56, 0x98e9, 0x8960, 0xbbfb, 0xaa72, 0x6306, 0x728f, 0x4014,
|
||||
0x519d, 0x2522, 0x34ab, 0x0630, 0x17b9, 0xef4e, 0xfec7, 0xcc5c, 0xddd5,
|
||||
0xa96a, 0xb8e3, 0x8a78, 0x9bf1, 0x7387, 0x620e, 0x5095, 0x411c, 0x35a3,
|
||||
0x242a, 0x16b1, 0x0738, 0xffcf, 0xee46, 0xdcdd, 0xcd54, 0xb9eb, 0xa862,
|
||||
0x9af9, 0x8b70, 0x8408, 0x9581, 0xa71a, 0xb693, 0xc22c, 0xd3a5, 0xe13e,
|
||||
0xf0b7, 0x0840, 0x19c9, 0x2b52, 0x3adb, 0x4e64, 0x5fed, 0x6d76, 0x7cff,
|
||||
0x9489, 0x8500, 0xb79b, 0xa612, 0xd2ad, 0xc324, 0xf1bf, 0xe036, 0x18c1,
|
||||
0x0948, 0x3bd3, 0x2a5a, 0x5ee5, 0x4f6c, 0x7df7, 0x6c7e, 0xa50a, 0xb483,
|
||||
0x8618, 0x9791, 0xe32e, 0xf2a7, 0xc03c, 0xd1b5, 0x2942, 0x38cb, 0x0a50,
|
||||
0x1bd9, 0x6f66, 0x7eef, 0x4c74, 0x5dfd, 0xb58b, 0xa402, 0x9699, 0x8710,
|
||||
0xf3af, 0xe226, 0xd0bd, 0xc134, 0x39c3, 0x284a, 0x1ad1, 0x0b58, 0x7fe7,
|
||||
0x6e6e, 0x5cf5, 0x4d7c, 0xc60c, 0xd785, 0xe51e, 0xf497, 0x8028, 0x91a1,
|
||||
0xa33a, 0xb2b3, 0x4a44, 0x5bcd, 0x6956, 0x78df, 0x0c60, 0x1de9, 0x2f72,
|
||||
0x3efb, 0xd68d, 0xc704, 0xf59f, 0xe416, 0x90a9, 0x8120, 0xb3bb, 0xa232,
|
||||
0x5ac5, 0x4b4c, 0x79d7, 0x685e, 0x1ce1, 0x0d68, 0x3ff3, 0x2e7a, 0xe70e,
|
||||
0xf687, 0xc41c, 0xd595, 0xa12a, 0xb0a3, 0x8238, 0x93b1, 0x6b46, 0x7acf,
|
||||
0x4854, 0x59dd, 0x2d62, 0x3ceb, 0x0e70, 0x1ff9, 0xf78f, 0xe606, 0xd49d,
|
||||
0xc514, 0xb1ab, 0xa022, 0x92b9, 0x8330, 0x7bc7, 0x6a4e, 0x58d5, 0x495c,
|
||||
0x3de3, 0x2c6a, 0x1ef1, 0x0f78,
|
||||
]);
|
||||
|
||||
// calculate the 16-bit CRC of data with predetermined length.
|
||||
function crc16(data) {
|
||||
var res = 0x0ffff;
|
||||
|
||||
for (let b of data) {
|
||||
res = ((res >> 8) & 0x0ff) ^ crctab16[(res ^ b) & 0xff];
|
||||
}
|
||||
|
||||
return ~res & 0x0ffff;
|
||||
}
|
||||
|
||||
function lockKeysFromRequest(call, serviceMethodName) {
|
||||
switch (serviceMethodName) {
|
||||
// controller
|
||||
|
|
@ -53,11 +113,19 @@ function getLargestNumber() {
|
|||
return number;
|
||||
}
|
||||
|
||||
function stripWindowsDriveLetter(path) {
|
||||
return path.replace(/^[a-zA-Z]:/, "");
|
||||
}
|
||||
|
||||
function hasWindowsDriveLetter(path) {
|
||||
return /^[a-zA-Z]:/i.test(path);
|
||||
}
|
||||
|
||||
/**
|
||||
* transition function to replicate `request` style requests using axios
|
||||
*
|
||||
* @param {*} options
|
||||
* @param {*} callback
|
||||
*
|
||||
* @param {*} options
|
||||
* @param {*} callback
|
||||
*/
|
||||
function axios_request(options, callback = function () {}) {
|
||||
function prep_response(res) {
|
||||
|
|
@ -80,7 +148,14 @@ function axios_request(options, callback = function () {}) {
|
|||
// The request was made and the server responded with a status code
|
||||
// that falls out of the range of 2xx
|
||||
let res = prep_response(err.response);
|
||||
callback(null, res, res.body);
|
||||
let senderr = false;
|
||||
if (
|
||||
options.validateStatus &&
|
||||
typeof options.validateStatus == "function"
|
||||
) {
|
||||
senderr = true;
|
||||
}
|
||||
callback(senderr ? err : null, res, res.body);
|
||||
} else if (err.request) {
|
||||
// The request was made but no response was received
|
||||
// `error.request` is an instance of XMLHttpRequest in the browser and an instance of
|
||||
|
|
@ -110,8 +185,82 @@ function stringify(value) {
|
|||
return JSON.stringify(value, getCircularReplacer());
|
||||
}
|
||||
|
||||
function default_supported_block_filesystems() {
|
||||
return ["btrfs", "exfat", "ext3", "ext4", "ext4dev", "ntfs", "vfat", "xfs"];
|
||||
}
|
||||
|
||||
function default_supported_file_filesystems() {
|
||||
return ["nfs", "cifs"];
|
||||
}
|
||||
|
||||
async function retry(retries, retriesDelay, code, options = {}) {
|
||||
let current_try = 0;
|
||||
let maxwait = _.get(options, "maxwait");
|
||||
let logerrors = _.get(options, "logerrors", false);
|
||||
let retryCondition = options.retryCondition;
|
||||
let executeStartTime;
|
||||
|
||||
do {
|
||||
current_try++;
|
||||
try {
|
||||
executeStartTime = Date.now();
|
||||
return await code();
|
||||
} catch (err) {
|
||||
if (current_try >= retries) {
|
||||
throw err;
|
||||
}
|
||||
if (retryCondition) {
|
||||
let retry = retryCondition(err);
|
||||
if (!retry) {
|
||||
console.log(`retry - failed condition, not trying again`);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
if (logerrors === true) {
|
||||
console.log(`retry - err:`, err);
|
||||
}
|
||||
}
|
||||
|
||||
// handle minExecutionTime
|
||||
if (options.minExecutionTime > 0) {
|
||||
let executionElapsedTIme = Date.now() - executeStartTime;
|
||||
let minExecutionDelayTime =
|
||||
options.minExecutionTime - executionElapsedTIme;
|
||||
if (minExecutionDelayTime > 0) {
|
||||
await sleep(minExecutionDelayTime);
|
||||
}
|
||||
}
|
||||
|
||||
// handle delay
|
||||
let sleep_time = retriesDelay;
|
||||
if (_.get(options, "exponential", false) === true) {
|
||||
sleep_time = retriesDelay * current_try;
|
||||
}
|
||||
|
||||
if (maxwait) {
|
||||
if (sleep_time > maxwait) {
|
||||
sleep_time = maxwait;
|
||||
}
|
||||
}
|
||||
if (sleep_time > 0) {
|
||||
console.log(`retry - waiting ${sleep_time}ms before trying again`);
|
||||
await sleep(sleep_time);
|
||||
}
|
||||
} while (true);
|
||||
}
|
||||
|
||||
module.exports.sleep = sleep;
|
||||
module.exports.md5 = md5;
|
||||
module.exports.crc32 = crc32;
|
||||
module.exports.crc16 = crc16;
|
||||
module.exports.lockKeysFromRequest = lockKeysFromRequest;
|
||||
module.exports.getLargestNumber = getLargestNumber;
|
||||
module.exports.stringify = stringify;
|
||||
module.exports.stripWindowsDriveLetter = stripWindowsDriveLetter;
|
||||
module.exports.hasWindowsDriveLetter = hasWindowsDriveLetter;
|
||||
module.exports.axios_request = axios_request;
|
||||
module.exports.default_supported_block_filesystems =
|
||||
default_supported_block_filesystems;
|
||||
module.exports.default_supported_file_filesystems =
|
||||
default_supported_file_filesystems;
|
||||
module.exports.retry = retry;
|
||||
|
|
|
|||
|
|
@ -298,7 +298,7 @@ class Mount {
|
|||
return false;
|
||||
}
|
||||
const mount_info = await mount.getMountDetails(path);
|
||||
const is_block = filesystem.isBlockDevice(path);
|
||||
const is_block = await filesystem.isBlockDevice(path);
|
||||
if (mount_info.fstype == "devtmpfs" && is_block) {
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,5 @@
|
|||
if (typeof String.prototype.replaceAll == "undefined") {
|
||||
String.prototype.replaceAll = function (match, replace) {
|
||||
return this.replace(new RegExp(match, "g"), () => replace);
|
||||
};
|
||||
}
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
const cp = require("child_process");
|
||||
|
||||
class Powershell {
|
||||
async exec(command, options = {}) {
|
||||
if (!options.hasOwnProperty("timeout")) {
|
||||
// TODO: cannot use this as fsck etc are too risky to kill
|
||||
//options.timeout = DEFAULT_TIMEOUT;
|
||||
}
|
||||
|
||||
//cmd := exec.Command("powershell", "-Mta", "-NoProfile", "-Command", command)
|
||||
|
||||
let stdin;
|
||||
if (options.stdin) {
|
||||
stdin = options.stdin;
|
||||
delete options.stdin;
|
||||
}
|
||||
|
||||
// https://github.com/kubernetes-csi/csi-proxy/blob/master/pkg/utils/utils.go
|
||||
const _command = "powershell";
|
||||
const args = [
|
||||
"-Mta",
|
||||
"-NoProfile",
|
||||
"-Command",
|
||||
command
|
||||
];
|
||||
|
||||
let command_log = `${_command} ${args.join(" ")}`.trim();
|
||||
if (stdin) {
|
||||
command_log = `echo '${stdin}' | ${command_log}`
|
||||
.trim()
|
||||
.replace(/\n/, "\\n");
|
||||
}
|
||||
console.log("executing powershell command: %s", command_log);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
const child = cp.spawn(_command, args, options);
|
||||
let stdout = "";
|
||||
let stderr = "";
|
||||
|
||||
child.on("spawn", function () {
|
||||
if (stdin) {
|
||||
child.stdin.setEncoding("utf-8");
|
||||
child.stdin.write(stdin);
|
||||
child.stdin.end();
|
||||
}
|
||||
});
|
||||
|
||||
child.stdout.on("data", function (data) {
|
||||
stdout = stdout + data;
|
||||
});
|
||||
|
||||
child.stderr.on("data", function (data) {
|
||||
stderr = stderr + data;
|
||||
});
|
||||
|
||||
child.on("close", function (code) {
|
||||
const result = { code, stdout, stderr, timeout: false };
|
||||
|
||||
// timeout scenario
|
||||
if (code === null) {
|
||||
result.timeout = true;
|
||||
reject(result);
|
||||
}
|
||||
|
||||
if (code) {
|
||||
console.log(
|
||||
"failed to execute powershell command: %s, response: %j",
|
||||
command_log,
|
||||
result
|
||||
);
|
||||
reject(result);
|
||||
} else {
|
||||
try {
|
||||
result.parsed = JSON.parse(result.stdout);
|
||||
} catch (err) { };
|
||||
resolve(result);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
module.exports.Powershell = Powershell;
|
||||
163
src/utils/ssh.js
163
src/utils/ssh.js
|
|
@ -1,4 +1,6 @@
|
|||
var Client = require("ssh2").Client;
|
||||
const Client = require("ssh2").Client;
|
||||
const { E_CANCELED, Mutex } = require("async-mutex");
|
||||
const GeneralUtils = require("./general");
|
||||
|
||||
class SshClient {
|
||||
constructor(options = {}) {
|
||||
|
|
@ -8,7 +10,47 @@ class SshClient {
|
|||
this.logger = this.options.logger;
|
||||
} else {
|
||||
this.logger = console;
|
||||
console.silly = console.debug;
|
||||
}
|
||||
|
||||
if (!this.options.connection.hasOwnProperty("keepaliveInterval")) {
|
||||
this.options.connection.keepaliveInterval = 10000;
|
||||
}
|
||||
|
||||
if (this.options.connection.debug == true) {
|
||||
this.options.connection.debug = function (msg) {
|
||||
this.debug(msg);
|
||||
};
|
||||
}
|
||||
|
||||
this.conn_mutex = new Mutex();
|
||||
this.conn_state;
|
||||
this.conn_err;
|
||||
this.ready_event_count = 0;
|
||||
this.error_event_count = 0;
|
||||
|
||||
this.conn = new Client();
|
||||
// invoked before close
|
||||
this.conn.on("end", () => {
|
||||
this.conn_state = "ended";
|
||||
this.debug("Client :: end");
|
||||
});
|
||||
// invoked after end
|
||||
this.conn.on("close", () => {
|
||||
this.conn_state = "closed";
|
||||
this.debug("Client :: close");
|
||||
});
|
||||
this.conn.on("error", (err) => {
|
||||
this.conn_state = "error";
|
||||
this.conn_err = err;
|
||||
this.error_event_count++;
|
||||
this.debug("Client :: error");
|
||||
});
|
||||
this.conn.on("ready", () => {
|
||||
this.conn_state = "ready";
|
||||
this.ready_event_count++;
|
||||
this.debug("Client :: ready");
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -27,17 +69,119 @@ class SshClient {
|
|||
this.logger.silly(...arguments);
|
||||
}
|
||||
|
||||
async _connect() {
|
||||
const start_ready_event_count = this.ready_event_count;
|
||||
const start_error_event_count = this.error_event_count;
|
||||
try {
|
||||
await this.conn_mutex.runExclusive(async () => {
|
||||
this.conn.connect(this.options.connection);
|
||||
do {
|
||||
if (start_error_event_count != this.error_event_count) {
|
||||
throw this.conn_err;
|
||||
}
|
||||
|
||||
if (start_ready_event_count != this.ready_event_count) {
|
||||
break;
|
||||
}
|
||||
|
||||
await GeneralUtils.sleep(100);
|
||||
} while (true);
|
||||
});
|
||||
} catch (err) {
|
||||
if (err === E_CANCELED) {
|
||||
return;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
async connect() {
|
||||
if (this.conn_state == "ready") {
|
||||
return;
|
||||
}
|
||||
|
||||
return this._connect();
|
||||
}
|
||||
|
||||
async exec(command, options = {}, stream_proxy = null) {
|
||||
// default is to reuse
|
||||
if (process.env.SSH_REUSE_CONNECTION == "0") {
|
||||
return this._nexec(...arguments);
|
||||
} else {
|
||||
return this._rexec(...arguments);
|
||||
}
|
||||
}
|
||||
|
||||
async _rexec(command, options = {}, stream_proxy = null) {
|
||||
const client = this;
|
||||
const conn = this.conn;
|
||||
|
||||
return new Promise(async (resolve, reject) => {
|
||||
do {
|
||||
try {
|
||||
await this.connect();
|
||||
conn.exec(command, options, function (err, stream) {
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
let stderr;
|
||||
let stdout;
|
||||
|
||||
if (stream_proxy) {
|
||||
stream_proxy.on("kill", (signal) => {
|
||||
stream.destroy();
|
||||
});
|
||||
}
|
||||
|
||||
stream
|
||||
.on("close", function (code, signal) {
|
||||
client.debug(
|
||||
"Stream :: close :: code: " + code + ", signal: " + signal
|
||||
);
|
||||
if (stream_proxy) {
|
||||
stream_proxy.emit("close", ...arguments);
|
||||
}
|
||||
resolve({ stderr, stdout, code, signal });
|
||||
//conn.end();
|
||||
})
|
||||
.on("data", function (data) {
|
||||
client.debug("STDOUT: " + data);
|
||||
if (stream_proxy) {
|
||||
stream_proxy.stdout.emit("data", ...arguments);
|
||||
}
|
||||
if (stdout == undefined) {
|
||||
stdout = "";
|
||||
}
|
||||
stdout = stdout.concat(data);
|
||||
})
|
||||
.stderr.on("data", function (data) {
|
||||
client.debug("STDERR: " + data);
|
||||
if (stream_proxy) {
|
||||
stream_proxy.stderr.emit("data", ...arguments);
|
||||
}
|
||||
if (stderr == undefined) {
|
||||
stderr = "";
|
||||
}
|
||||
stderr = stderr.concat(data);
|
||||
});
|
||||
});
|
||||
break;
|
||||
} catch (err) {
|
||||
if (err.message && !err.message.includes("Not connected")) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
await GeneralUtils.sleep(1000);
|
||||
} while (true);
|
||||
});
|
||||
}
|
||||
|
||||
async _nexec(command, options = {}, stream_proxy = null) {
|
||||
const client = this;
|
||||
return new Promise((resolve, reject) => {
|
||||
var conn = new Client();
|
||||
|
||||
if (client.options.connection.debug == true) {
|
||||
client.options.connection.debug = function (msg) {
|
||||
client.debug(msg);
|
||||
};
|
||||
}
|
||||
|
||||
conn
|
||||
.on("error", function (err) {
|
||||
client.debug("Client :: error");
|
||||
|
|
@ -50,7 +194,10 @@ class SshClient {
|
|||
// TERM: "",
|
||||
//};
|
||||
conn.exec(command, options, function (err, stream) {
|
||||
if (err) reject(err);
|
||||
if (err) {
|
||||
reject(err);
|
||||
return;
|
||||
}
|
||||
let stderr;
|
||||
let stdout;
|
||||
stream
|
||||
|
|
|
|||
|
|
@ -0,0 +1,783 @@
|
|||
const _ = require("lodash");
|
||||
const GeneralUtils = require("./general");
|
||||
const Powershell = require("./powershell").Powershell;
|
||||
|
||||
/**
|
||||
* https://kubernetes.io/blog/2021/08/16/windows-hostprocess-containers/
|
||||
* https://github.com/kubernetes-csi/csi-proxy/tree/master/pkg/os
|
||||
*
|
||||
* multipath notes:
|
||||
* - http://scst.sourceforge.net/mc_s.html
|
||||
* - https://github.com/kubernetes-csi/csi-proxy/pull/99
|
||||
* - https://docs.microsoft.com/en-us/azure/storsimple/storsimple-8000-configure-mpio-windows-server
|
||||
* - https://support.purestorage.com/Legacy_Documentation/Setting_the_MPIO_Policy
|
||||
* - https://docs.microsoft.com/en-us/powershell/module/mpio/?view=windowsserver2022-ps
|
||||
*
|
||||
* Get-WindowsFeature -Name 'Multipath-IO'
|
||||
* Add-WindowsFeature -Name 'Multipath-IO'
|
||||
*
|
||||
* Enable-MSDSMAutomaticClaim -BusType "iSCSI"
|
||||
* Disable-MSDSMAutomaticClaim -BusType "iSCSI"
|
||||
*
|
||||
* Get-MSDSMGlobalDefaultLoadBalancePolicy
|
||||
* Set-MSDSMGlobalLoadBalancePolicy -Policy RR
|
||||
*
|
||||
* synology woes:
|
||||
* - https://community.spiceworks.com/topic/2279882-synology-iscsi-will-not-disconnect-using-powershell-commands
|
||||
* - https://support.hpe.com/hpesc/public/docDisplay?docId=c01880810&docLocale=en_US
|
||||
* - https://askubuntu.com/questions/1159103/why-is-iscsi-trying-to-connect-on-ipv6-at-boot
|
||||
*/
|
||||
class Windows {
|
||||
constructor() {
|
||||
this.ps = new Powershell();
|
||||
}
|
||||
|
||||
resultToArray(result) {
|
||||
if (!result.parsed) {
|
||||
result.parsed = [];
|
||||
}
|
||||
if (!Array.isArray(result.parsed)) {
|
||||
result.parsed = [result.parsed];
|
||||
}
|
||||
}
|
||||
|
||||
uncPathToShare(path) {
|
||||
// UNC\<server>\<share>[\<path>\]
|
||||
if (path.startsWith("UNC")) {
|
||||
path = path.replace("UNC", "\\");
|
||||
}
|
||||
|
||||
if (!path.startsWith("\\\\")) {
|
||||
path = `\\\\${path}`;
|
||||
}
|
||||
|
||||
let parts = path.split("\\");
|
||||
return `\\\\${parts[2]}\\${parts[3]}`;
|
||||
}
|
||||
|
||||
async GetRealTarget(path) {
|
||||
let item;
|
||||
let target;
|
||||
|
||||
do {
|
||||
item = await this.GetItem(path);
|
||||
path = null;
|
||||
|
||||
target = _.get(item, "Target.[0]", "");
|
||||
if (target.startsWith("UNC")) {
|
||||
let parts = target.split("\\", 3);
|
||||
return `\\\\${parts[1]}\\${parts[2]}`;
|
||||
} else if (target.startsWith("Volume")) {
|
||||
return `\\\\?\\${target}`;
|
||||
} else {
|
||||
path = target;
|
||||
}
|
||||
} while (path);
|
||||
}
|
||||
|
||||
async GetItem(localPath) {
|
||||
let command;
|
||||
let result;
|
||||
command = 'Get-Item "$Env:localpath" | ConvertTo-Json';
|
||||
try {
|
||||
result = await this.ps.exec(command, {
|
||||
env: {
|
||||
localpath: localPath,
|
||||
},
|
||||
});
|
||||
return result.parsed;
|
||||
} catch (err) {}
|
||||
}
|
||||
|
||||
async GetSmbGlobalMapping(remotePath) {
|
||||
let command;
|
||||
// cannot have trailing slash nor a path
|
||||
// must be \\<server>\<share>
|
||||
remotePath = this.uncPathToShare(remotePath);
|
||||
command =
|
||||
"Get-SmbGlobalMapping -RemotePath $Env:smbremotepath | ConvertTo-Json";
|
||||
try {
|
||||
return await this.ps.exec(command, {
|
||||
env: {
|
||||
smbremotepath: remotePath,
|
||||
},
|
||||
});
|
||||
} catch (err) {}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global in this context is allowed access by all users
|
||||
*
|
||||
* @param {*} remotePath
|
||||
* @param {*} username
|
||||
* @param {*} password
|
||||
*/
|
||||
async NewSmbGlobalMapping(remotePath, username, password) {
|
||||
let result;
|
||||
let command;
|
||||
// -UseWriteThrough $true
|
||||
// cannot have trailing slash nor a path
|
||||
// must be \\<server>\<share>
|
||||
remotePath = this.uncPathToShare(remotePath);
|
||||
command =
|
||||
"$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential -RequirePrivacy $true";
|
||||
|
||||
result = await this.GetSmbGlobalMapping(remotePath);
|
||||
if (!result) {
|
||||
await this.ps.exec(command, {
|
||||
env: {
|
||||
smbuser: username,
|
||||
smbpassword: password,
|
||||
smbremotepath: remotePath,
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
async RemoveSmbGlobalMapping(remotePath) {
|
||||
let result;
|
||||
let command;
|
||||
// cannot have trailing slash nor a path
|
||||
// must be \\<server>\<share>
|
||||
remotePath = this.uncPathToShare(remotePath);
|
||||
command = "Remove-SmbGlobalMapping -RemotePath $Env:smbremotepath -Force";
|
||||
|
||||
do {
|
||||
result = await this.GetSmbGlobalMapping(remotePath);
|
||||
if (result) {
|
||||
await this.ps.exec(command, {
|
||||
env: {
|
||||
smbremotepath: remotePath,
|
||||
},
|
||||
});
|
||||
}
|
||||
} while (result);
|
||||
}
|
||||
|
||||
async NewSmbLink(remotePath, localPath) {
|
||||
let command;
|
||||
// trailing slash required
|
||||
// may include subdirectories on the share if desired
|
||||
if (!remotePath.endsWith("\\")) {
|
||||
remotePath = `${remotePath}\\`;
|
||||
}
|
||||
|
||||
command =
|
||||
"New-Item -ItemType SymbolicLink $Env:smblocalPath -Target $Env:smbremotepath";
|
||||
await this.ps.exec(command, {
|
||||
env: {
|
||||
smblocalpath: localPath,
|
||||
smbremotepath: remotePath,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
async NewIscsiTargetPortal(address, port) {
|
||||
let command;
|
||||
command =
|
||||
"New-IscsiTargetPortal -TargetPortalAddress ${Env:iscsi_tp_address} -TargetPortalPortNumber ${Env:iscsi_tp_port}";
|
||||
await this.ps.exec(command, {
|
||||
env: {
|
||||
iscsi_tp_address: address,
|
||||
iscsi_tp_port: port,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
async RemoveIscsiTargetPortalByTargetPortalAddress(targetPortalAddress) {
|
||||
let command;
|
||||
command = `Remove-IscsiTargetPortal -TargetPortalAddress ${targetPortalAddress} -Confirm:$false`;
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async RemoveIscsiTargetPortalByTargetPortalAddressTargetPortalPort(
|
||||
targetPortalAddress,
|
||||
targetPortalPort
|
||||
) {
|
||||
let command;
|
||||
command = `Get-IscsiTargetPortal -TargetPortalAddress ${targetPortalAddress} -TargetPortalPortNumber ${targetPortalPort} | Remove-IscsiTargetPortal -Confirm:$false`;
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async IscsiTargetIsConnectedByPortalAddressPortalPort(address, port, iqn) {
|
||||
let sessions = await this.GetIscsiSessionsByTargetNodeAddress(iqn);
|
||||
for (let session of sessions) {
|
||||
let connections = await this.GetIscsiConnectionsByIscsiSessionIdentifier(
|
||||
session.SessionIdentifier
|
||||
);
|
||||
for (let connection of connections) {
|
||||
if (
|
||||
connection.TargetAddress == address &&
|
||||
connection.TargetPortNumber == port
|
||||
) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//process.exit(1);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* -IsMultipathEnabled
|
||||
*
|
||||
* @param {*} address
|
||||
* @param {*} port
|
||||
* @param {*} iqn
|
||||
* @param {*} authType
|
||||
* @param {*} chapUser
|
||||
* @param {*} chapSecret
|
||||
*/
|
||||
async ConnectIscsiTarget(
|
||||
address,
|
||||
port,
|
||||
iqn,
|
||||
authType,
|
||||
chapUser,
|
||||
chapSecret,
|
||||
multipath = false
|
||||
) {
|
||||
let is_connected =
|
||||
await this.IscsiTargetIsConnectedByPortalAddressPortalPort(
|
||||
address,
|
||||
port,
|
||||
iqn
|
||||
);
|
||||
if (is_connected) {
|
||||
return;
|
||||
}
|
||||
|
||||
let command;
|
||||
// -IsMultipathEnabled $([System.Convert]::ToBoolean(${Env:iscsi_is_multipath}))
|
||||
// -InitiatorPortalAddress
|
||||
command =
|
||||
"Connect-IscsiTarget -TargetPortalAddress ${Env:iscsi_tp_address} -TargetPortalPortNumber ${Env:iscsi_tp_port} -NodeAddress ${Env:iscsi_target_iqn} -AuthenticationType ${Env:iscsi_auth_type}";
|
||||
|
||||
if (chapUser) {
|
||||
command += " -ChapUsername ${Env:iscsi_chap_user}";
|
||||
}
|
||||
|
||||
if (chapSecret) {
|
||||
command += " -ChapSecret ${Env:iscsi_chap_secret}";
|
||||
}
|
||||
|
||||
if (multipath) {
|
||||
command +=
|
||||
" -IsMultipathEnabled $([System.Convert]::ToBoolean(${Env:iscsi_is_multipath}))";
|
||||
}
|
||||
|
||||
try {
|
||||
await this.ps.exec(command, {
|
||||
env: {
|
||||
iscsi_tp_address: address,
|
||||
iscsi_tp_port: port,
|
||||
iscsi_target_iqn: iqn,
|
||||
iscsi_auth_type: authType,
|
||||
iscsi_chap_user: chapUser,
|
||||
iscsi_chap_secret: chapSecret,
|
||||
iscsi_is_multipath: String(multipath),
|
||||
},
|
||||
});
|
||||
} catch (err) {
|
||||
let details = _.get(err, "stderr", "");
|
||||
if (
|
||||
!details.includes(
|
||||
"The target has already been logged in via an iSCSI session"
|
||||
)
|
||||
) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async GetIscsiTargetsByTargetPortalAddressTargetPortalPort(address, port) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command =
|
||||
"Get-IscsiTargetPortal -TargetPortalAddress ${Env:iscsi_tp_address} -TargetPortalPortNumber ${Env:iscsi_tp_port} | Get-IscsiTarget | ConvertTo-Json";
|
||||
result = await this.ps.exec(command, {
|
||||
env: {
|
||||
iscsi_tp_address: address,
|
||||
iscsi_tp_port: port,
|
||||
},
|
||||
});
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* This disconnects *all* sessions from the target
|
||||
*
|
||||
* @param {*} nodeAddress
|
||||
*/
|
||||
async DisconnectIscsiTargetByNodeAddress(nodeAddress) {
|
||||
let command;
|
||||
|
||||
// https://github.com/PowerShell/PowerShell/issues/17306
|
||||
command = `Disconnect-IscsiTarget -NodeAddress ${nodeAddress.toLowerCase()} -Confirm:$false`;
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async GetIscsiConnectionsByIscsiSessionIdentifier(iscsiSessionIdentifier) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-IscsiSession -SessionIdentifier ${iscsiSessionIdentifier} | Get-IscsiConnection | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetIscsiSessions() {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-IscsiSession | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetIscsiSessionsByDiskNumber(diskNumber) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Disk -Number ${diskNumber} | Get-IscsiSession | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetIscsiSessionsByVolumeId(volumeId) {
|
||||
let sessions = [];
|
||||
let disks = await this.GetDisksByVolumeId(volumeId);
|
||||
for (let disk of disks) {
|
||||
let i_sessions = await this.GetIscsiSessionsByDiskNumber(disk.DiskNumber);
|
||||
sessions.push(...i_sessions);
|
||||
}
|
||||
|
||||
return sessions;
|
||||
}
|
||||
|
||||
async GetIscsiSessionsByTargetNodeAddress(targetNodeAddress) {
|
||||
let sessions = await this.GetIscsiSessions();
|
||||
let r_sessions = [];
|
||||
// Where-Object { $_.TargetNodeAddress -eq ${targetNodeAddress} }
|
||||
for (let session of sessions) {
|
||||
if (session.TargetNodeAddress == targetNodeAddress) {
|
||||
r_sessions.push(session);
|
||||
}
|
||||
}
|
||||
|
||||
return r_sessions;
|
||||
}
|
||||
|
||||
async GetIscsiSessionByIscsiConnectionIdentifier(iscsiConnectionIdentifier) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-IscsiConnection -ConnectionIdentifier ${iscsiConnectionIdentifier} | Get-IscsiSession | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetIscsiTargetPortalBySessionId(sessionId) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-IscsiSession -SessionIdentifier ${sessionId} | Get-IscsiTargetPortal | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async UpdateHostStorageCache() {
|
||||
let command;
|
||||
command = "Update-HostStorageCache";
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async GetIscsiDisks() {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = "Get-iSCSISession | Get-Disk | ConvertTo-Json";
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetWin32DiskDrives() {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = "Get-WmiObject Win32_DiskDrive | ConvertTo-Json";
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetDiskLunByDiskNumber(diskNumber) {
|
||||
let result;
|
||||
result = await this.GetWin32DiskDrives();
|
||||
for (let drive of result) {
|
||||
if (drive.Index == diskNumber) {
|
||||
return drive.SCSILogicalUnit;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async GetTargetDisks(address, port, iqn) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
// this fails for synology for some reason
|
||||
//command =
|
||||
// '$ErrorActionPreference = "Stop"; $tp = Get-IscsiTargetPortal -TargetPortalAddress ${Env:iscsi_tp_address} -TargetPortalPortNumber ${Env:iscsi_tp_port}; $t = $tp | Get-IscsiTarget | Where-Object { $_.NodeAddress -eq ${Env:iscsi_target_iqn} }; $s = Get-iSCSISession -IscsiTarget $t; $s | Get-Disk | ConvertTo-Json';
|
||||
|
||||
command =
|
||||
'$ErrorActionPreference = "Stop"; $s = Get-iSCSISession | Where-Object { $_.TargetNodeAddress -eq ${Env:iscsi_target_iqn} }; $s | Get-Disk | ConvertTo-Json';
|
||||
|
||||
result = await this.ps.exec(command, {
|
||||
env: {
|
||||
iscsi_tp_address: address,
|
||||
iscsi_tp_port: port,
|
||||
iscsi_target_iqn: iqn,
|
||||
},
|
||||
});
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetTargetDisksByIqn(iqn) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command =
|
||||
'$ErrorActionPreference = "Stop"; $s = Get-iSCSISession | Where-Object { $_.TargetNodeAddress -eq ${Env:iscsi_target_iqn} }; $s | Get-Disk | ConvertTo-Json';
|
||||
|
||||
result = await this.ps.exec(command, {
|
||||
env: {
|
||||
iscsi_target_iqn: iqn,
|
||||
},
|
||||
});
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* This can be multiple when mpio is not configured properly and each
|
||||
* session creates a new disk
|
||||
*
|
||||
* @param {*} iqn
|
||||
* @param {*} lun
|
||||
* @returns
|
||||
*/
|
||||
async GetTargetDisksByIqnLun(iqn, lun) {
|
||||
let result;
|
||||
let dlun;
|
||||
let disks = [];
|
||||
|
||||
result = await this.GetTargetDisksByIqn(iqn);
|
||||
for (let disk of result) {
|
||||
dlun = await this.GetDiskLunByDiskNumber(disk.DiskNumber);
|
||||
if (dlun == lun) {
|
||||
disks.push(disk);
|
||||
}
|
||||
}
|
||||
|
||||
return disks;
|
||||
}
|
||||
|
||||
async GetDiskByDiskNumber(diskNumber) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Disk -Number ${diskNumber} | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetDisks() {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = "Get-Disk | ConvertTo-Json";
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetPartitions() {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = "Get-Partition | ConvertTo-Json";
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetPartitionsByDiskNumber(diskNumber) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Disk -Number ${diskNumber} | Get-Partition | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async DiskIsInitialized(diskNumber) {
|
||||
let disk = await this.GetDiskByDiskNumber(diskNumber);
|
||||
|
||||
return disk.PartitionStyle != "RAW";
|
||||
}
|
||||
|
||||
async InitializeDisk(diskNumber) {
|
||||
let command;
|
||||
|
||||
command = `Initialize-Disk -Number ${diskNumber} -PartitionStyle GPT`;
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async DiskHasBasicPartition(diskNumber) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Partition | Where DiskNumber -eq ${diskNumber} | Where Type -ne Reserved | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed.length > 0;
|
||||
}
|
||||
|
||||
async NewPartition(diskNumber) {
|
||||
let command;
|
||||
|
||||
command = `New-Partition -DiskNumber ${diskNumber} -UseMaximumSize`;
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async PartitionDisk(diskNumber) {
|
||||
let is_intialized;
|
||||
let has_basic_partition;
|
||||
|
||||
is_intialized = await this.DiskIsInitialized(diskNumber);
|
||||
if (!is_intialized) {
|
||||
await this.InitializeDisk(diskNumber);
|
||||
}
|
||||
|
||||
has_basic_partition = await this.DiskHasBasicPartition(diskNumber);
|
||||
if (!has_basic_partition) {
|
||||
await this.NewPartition(diskNumber);
|
||||
}
|
||||
}
|
||||
|
||||
async GetLastPartitionByDiskNumber(diskNumber) {
|
||||
let partitions = await this.GetPartitionsByDiskNumber(diskNumber);
|
||||
let p;
|
||||
for (let partition of partitions) {
|
||||
if (!p) {
|
||||
p = partition;
|
||||
}
|
||||
|
||||
if (partition.PartitionNumber > p.PartitionNumber) {
|
||||
p = partition;
|
||||
}
|
||||
}
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
async GetVolumesByDiskNumber(diskNumber) {
|
||||
let command;
|
||||
command = `Get-Disk -Number ${diskNumber} | Get-Partition | Get-Volume | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
this.resultToArray(result);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetVolumeByDiskNumberPartitionNumber(diskNumber, partitionNumber) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Disk -Number ${diskNumber} | Get-Partition -PartitionNumber ${partitionNumber} | Get-Volume | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetVolumeByVolumeId(volumeId) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" -ErrorAction Stop | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
|
||||
return result.parsed;
|
||||
}
|
||||
|
||||
async GetPartitionsByVolumeId(volumeId) {
|
||||
let partitions = await this.GetPartitions();
|
||||
let p = [];
|
||||
for (let partition of partitions) {
|
||||
let paths = _.get(partition, "AccessPaths", []);
|
||||
if (paths === null) {
|
||||
paths = [];
|
||||
}
|
||||
if (!Array.isArray(paths)) {
|
||||
paths = [];
|
||||
}
|
||||
if (paths.includes(volumeId)) {
|
||||
p.push(partition);
|
||||
}
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
async GetDisksByVolumeId(volumeId) {
|
||||
let partitions = await this.GetPartitionsByVolumeId(volumeId);
|
||||
let diskNumbers = new Set();
|
||||
for (let parition of partitions) {
|
||||
diskNumbers.add(parition.DiskNumber);
|
||||
}
|
||||
|
||||
let disks = [];
|
||||
let disk;
|
||||
for (let diskNumber of diskNumbers) {
|
||||
disk = await this.GetDiskByDiskNumber(diskNumber);
|
||||
if (disk) {
|
||||
disks.push(disk);
|
||||
}
|
||||
}
|
||||
|
||||
return disks;
|
||||
}
|
||||
|
||||
async VolumeIsFormatted(volumeId) {
|
||||
let volume = await this.GetVolumeByVolumeId(volumeId);
|
||||
let type = volume.FileSystemType || "";
|
||||
type = type.toLowerCase().trim();
|
||||
if (!type || type == "unknown") {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
async VolumeIsIscsi(volumeId) {
|
||||
let disks = await this.GetDisksByVolumeId(volumeId);
|
||||
for (let disk of disks) {
|
||||
if (_.get(disk, "BusType", "").toLowerCase() == "iscsi") {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
async FormatVolume(volumeId) {
|
||||
let command;
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Format-Volume -FileSystem ntfs -Confirm:$false`;
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async ResizeVolume(volumeId, size = 0) {
|
||||
let command;
|
||||
let final_size;
|
||||
|
||||
if (!size) {
|
||||
final_size = await this.GetVolumeMaxSize(volumeId);
|
||||
} else {
|
||||
final_size = size;
|
||||
}
|
||||
|
||||
let current_size = await this.GetVolumeSize(volumeId);
|
||||
if (current_size >= final_size) {
|
||||
return;
|
||||
}
|
||||
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Get-Partition | Resize-Partition -Size ${final_size}`;
|
||||
try {
|
||||
await this.ps.exec(command);
|
||||
} catch (err) {
|
||||
let details = _.get(err, "stderr", "");
|
||||
if (
|
||||
!details.includes(
|
||||
"The size of the extent is less than the minimum of 1MB"
|
||||
)
|
||||
) {
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async GetVolumeMaxSize(volumeId) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Get-partition | Get-PartitionSupportedSize | Select SizeMax | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
return result.parsed.SizeMax;
|
||||
}
|
||||
async GetVolumeSize(volumeId) {
|
||||
let command;
|
||||
let result;
|
||||
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Get-partition | ConvertTo-Json`;
|
||||
result = await this.ps.exec(command);
|
||||
|
||||
return result.parsed.Size;
|
||||
}
|
||||
|
||||
async MountVolume(volumeId, path) {
|
||||
let command;
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Get-Partition | Add-PartitionAccessPath -AccessPath ${path}`;
|
||||
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async UnmountVolume(volumeId, path) {
|
||||
let command;
|
||||
|
||||
// this errors if it does not have a drive letter
|
||||
if (!GeneralUtils.hasWindowsDriveLetter(path)) {
|
||||
let item = await this.GetItem(path);
|
||||
if (!item) {
|
||||
return;
|
||||
}
|
||||
path = item.FullName;
|
||||
}
|
||||
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Get-Partition | Remove-PartitionAccessPath -AccessPath ${path}`;
|
||||
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
|
||||
async WriteVolumeCache(volumeId) {
|
||||
let command;
|
||||
command = `Get-Volume -UniqueId \"${volumeId}\" | Write-Volumecache`;
|
||||
|
||||
await this.ps.exec(command);
|
||||
}
|
||||
}
|
||||
|
||||
module.exports.Windows = Windows;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue