support new containerd-oci-ephemeral-inline driver
Signed-off-by: Travis Glenn Hansen <travisghansen@yahoo.com>
This commit is contained in:
		
							parent
							
								
									55c36d62ff
								
							
						
					
					
						commit
						fc7ec358ab
					
				|  | @ -487,16 +487,16 @@ jobs: | |||
|     runs-on: ${{ matrix.os }} | ||||
|     strategy: | ||||
|       matrix: | ||||
|         os: [windows-2019, windows-2022] | ||||
|         os: [windows-2022, windows-2025] | ||||
|         include: | ||||
|           - os: windows-2019 | ||||
|             core_base_tag: ltsc2019 | ||||
|             nano_base_tag: "1809" | ||||
|             file: Dockerfile.Windows | ||||
|           - os: windows-2022 | ||||
|             core_base_tag: ltsc2022 | ||||
|             nano_base_tag: ltsc2022 | ||||
|             file: Dockerfile.Windows | ||||
|           - os: windows-2025 | ||||
|             core_base_tag: ltsc2025 | ||||
|             nano_base_tag: ltsc2025 | ||||
|             file: Dockerfile.Windows | ||||
|     steps: | ||||
|       - uses: actions/checkout@v4 | ||||
|       - name: docker build | ||||
|  | @ -528,10 +528,10 @@ jobs: | |||
|       - uses: actions/checkout@v4 | ||||
|       - uses: actions/download-artifact@v4 | ||||
|         with: | ||||
|           name: democratic-csi-windows-ltsc2019.tar | ||||
|           name: democratic-csi-windows-ltsc2022.tar | ||||
|       - uses: actions/download-artifact@v4 | ||||
|         with: | ||||
|           name: democratic-csi-windows-ltsc2022.tar | ||||
|           name: democratic-csi-windows-ltsc2025.tar | ||||
|       - name: push windows images with buildah | ||||
|         run: | | ||||
|           #.github/bin/install_latest_buildah.sh | ||||
|  |  | |||
							
								
								
									
										34
									
								
								Dockerfile
								
								
								
								
							
							
						
						
									
										34
									
								
								Dockerfile
								
								
								
								
							|  | @ -1,6 +1,25 @@ | |||
| # docker build --pull -t foobar . | ||||
| # docker buildx build --pull -t foobar --platform linux/amd64,linux/arm64,linux/arm/v7,linux/s390x,linux/ppc64le . | ||||
| # docker run --rm -ti --user root --entrypoint /bin/bash foobar | ||||
| 
 | ||||
| ###################### | ||||
| # golang builder | ||||
| ###################### | ||||
| FROM golang:1.25.3-bookworm as ctrbuilder | ||||
| 
 | ||||
| # /go/containerd/ctr | ||||
| ADD docker/ctr-mount-labels.diff /tmp | ||||
| RUN \ | ||||
|   git clone https://github.com/containerd/containerd.git; \ | ||||
|   cd containerd && \ | ||||
|   git checkout v2.0.4 && \ | ||||
|   git apply /tmp/ctr-mount-labels.diff && \ | ||||
|   CGO_ENABLED=0 go build ./cmd/ctr/; | ||||
| 
 | ||||
| 
 | ||||
| ###################### | ||||
| # nodejs builder | ||||
| ###################### | ||||
| FROM debian:12-slim AS build | ||||
| #FROM --platform=$BUILDPLATFORM debian:10-slim AS build | ||||
| 
 | ||||
|  | @ -78,6 +97,9 @@ RUN test $(uname -m) != armv7l || ( \ | |||
|   && rm -rf /var/lib/apt/lists/* \ | ||||
|   ) | ||||
| 
 | ||||
| # install ctr | ||||
| COPY --from=ctrbuilder /go/containerd/ctr /usr/local/bin/ctr | ||||
| 
 | ||||
| # install node | ||||
| #ENV PATH=/usr/local/lib/nodejs/bin:$PATH | ||||
| #COPY --from=build /usr/local/lib/nodejs /usr/local/lib/nodejs | ||||
|  | @ -116,31 +138,27 @@ RUN chmod +x /usr/local/sbin/yq-installer.sh && yq-installer.sh | |||
| #        rm -rf /var/lib/apt/lists/* | ||||
| 
 | ||||
| # install objectivefs | ||||
| ARG OBJECTIVEFS_VERSION=7.2 | ||||
| ARG OBJECTIVEFS_VERSION=7.3 | ||||
| ADD docker/objectivefs-installer.sh /usr/local/sbin | ||||
| RUN chmod +x /usr/local/sbin/objectivefs-installer.sh && objectivefs-installer.sh | ||||
| 
 | ||||
| # install wrappers | ||||
| ADD docker/iscsiadm /usr/local/sbin | ||||
| RUN chmod +x /usr/local/sbin/iscsiadm | ||||
| 
 | ||||
| ADD docker/multipath /usr/local/sbin | ||||
| RUN chmod +x /usr/local/sbin/multipath | ||||
| 
 | ||||
| ## USE_HOST_MOUNT_TOOLS=1 | ||||
| ADD docker/mount /usr/local/bin/mount | ||||
| RUN chmod +x /usr/local/bin/mount | ||||
| 
 | ||||
| ## USE_HOST_MOUNT_TOOLS=1 | ||||
| ADD docker/umount /usr/local/bin/umount | ||||
| RUN chmod +x /usr/local/bin/umount | ||||
| 
 | ||||
| ADD docker/zfs /usr/local/bin/zfs | ||||
| RUN chmod +x /usr/local/bin/zfs | ||||
| ADD docker/zpool /usr/local/bin/zpool | ||||
| RUN chmod +x /usr/local/bin/zpool | ||||
| ADD docker/oneclient /usr/local/bin/oneclient | ||||
| RUN chmod +x /usr/local/bin/oneclient | ||||
| 
 | ||||
| RUN chown -R root:root /usr/local/bin/* | ||||
| RUN chmod +x /usr/local/bin/* | ||||
| 
 | ||||
| # Run as a non-root user | ||||
| RUN useradd --create-home csi \ | ||||
|  |  | |||
|  | @ -99,6 +99,7 @@ COPY --from=build /PowerShell /PowerShell | |||
| COPY --from=build /app /app | ||||
| WORKDIR /app | ||||
| 
 | ||||
| ADD https://github.com/democratic-csi/democratic-csi/releases/download/v1.0.0/ctr.exe ./bin | ||||
| COPY --from=build /nodejs/node.exe ./bin | ||||
| COPY --from=build /usr/local/bin/ ./bin | ||||
| 
 | ||||
|  |  | |||
|  | @ -463,6 +463,7 @@ passwd smbroot (optional) | |||
| smbpasswd -L -a smbroot | ||||
| 
 | ||||
| ####### nvmeof | ||||
| # apt-get install linux-modules-extra-$(uname -r) | ||||
| # ensure nvmeof target modules are loaded at startup | ||||
| cat <<EOF > /etc/modules-load.d/nvmet.conf | ||||
| nvmet | ||||
|  | @ -483,7 +484,8 @@ cd nvmetcli | |||
| 
 | ||||
| ## install globally | ||||
| python3 setup.py install --prefix=/usr | ||||
| pip install configshell_fb | ||||
| pip install configshell_fb # apt-get install -y pip python3-configshell-fb | ||||
| 
 | ||||
| 
 | ||||
| ## install to root home dir | ||||
| python3 setup.py install --user | ||||
|  |  | |||
|  | @ -0,0 +1,31 @@ | |||
| diff --git a/cmd/ctr/commands/images/mount.go b/cmd/ctr/commands/images/mount.go
 | ||||
| index c97954267..63c5a7746 100644
 | ||||
| --- a/cmd/ctr/commands/images/mount.go
 | ||||
| +++ b/cmd/ctr/commands/images/mount.go
 | ||||
| @@ -25,6 +25,7 @@ import (
 | ||||
|  	"github.com/containerd/containerd/v2/cmd/ctr/commands" | ||||
|  	"github.com/containerd/containerd/v2/core/leases" | ||||
|  	"github.com/containerd/containerd/v2/core/mount" | ||||
| +	"github.com/containerd/containerd/v2/core/snapshots"
 | ||||
|  	"github.com/containerd/containerd/v2/defaults" | ||||
|  	"github.com/containerd/errdefs" | ||||
|  	"github.com/containerd/platforms" | ||||
| @@ -114,11 +115,16 @@ When you are done, use the unmount command.
 | ||||
|   | ||||
|  		s := client.SnapshotService(snapshotter) | ||||
|   | ||||
| +		labels := commands.LabelArgs(cliContext.StringSlice("label"))
 | ||||
| +		opts := []snapshots.Opt{
 | ||||
| +			snapshots.WithLabels(labels),
 | ||||
| +		}
 | ||||
| +
 | ||||
|  		var mounts []mount.Mount | ||||
|  		if cliContext.Bool("rw") { | ||||
| -			mounts, err = s.Prepare(ctx, target, chainID)
 | ||||
| +			mounts, err = s.Prepare(ctx, target, chainID, opts...)
 | ||||
|  		} else { | ||||
| -			mounts, err = s.View(ctx, target, chainID)
 | ||||
| +			mounts, err = s.View(ctx, target, chainID, opts...)
 | ||||
|  		} | ||||
|  		if err != nil { | ||||
|  			if errdefs.IsAlreadyExists(err) { | ||||
|  | @ -0,0 +1,6 @@ | |||
| driver: containerd-oci-ephemeral-inline | ||||
| containerd: | ||||
|   #address: /run/containerd/containerd.sock | ||||
|   #windowsAddress: \\\\.\\pipe\\containerd-containerd | ||||
|   #namespace: default | ||||
|   #creds encryption key | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							|  | @ -18,6 +18,7 @@ | |||
|     "url": "https://github.com/democratic-csi/democratic-csi.git" | ||||
|   }, | ||||
|   "dependencies": { | ||||
|     "@codefresh-io/docker-reference": "^0.0.11", | ||||
|     "@grpc/grpc-js": "^1.8.4", | ||||
|     "@grpc/proto-loader": "^0.7.0", | ||||
|     "@kubernetes/client-node": "^0.18.0", | ||||
|  |  | |||
|  | @ -0,0 +1,493 @@ | |||
| const _ = require("lodash"); | ||||
| const fs = require("fs"); | ||||
| const CTR = require("../../utils/ctr").CTR; | ||||
| const { CsiBaseDriver } = require("../index"); | ||||
| const { GrpcError, grpc } = require("../../utils/grpc"); | ||||
| const { Filesystem } = require("../../utils/filesystem"); | ||||
| const { Mount } = require("../../utils/mount"); | ||||
| const semver = require("semver"); | ||||
| const { parseAll } = require("@codefresh-io/docker-reference"); | ||||
| 
 | ||||
| const __REGISTRY_NS__ = "EphemeralInlineContainerDOciDriver"; | ||||
| 
 | ||||
| /** | ||||
|  * https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/20190122-csi-inline-volumes.md
 | ||||
|  * https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html
 | ||||
|  * | ||||
|  * Sample calls: | ||||
|  *  - https://gcsweb.k8s.io/gcs/kubernetes-jenkins/pr-logs/pull/92387/pull-kubernetes-e2e-gce/1280784994997899264/artifacts/_sig-storage_CSI_Volumes/_Driver_csi-hostpath_/_Testpattern_inline_ephemeral_CSI_volume_ephemeral/should_create_read_write_inline_ephemeral_volume/
 | ||||
|  *  - https://storage.googleapis.com/kubernetes-jenkins/pr-logs/pull/92387/pull-kubernetes-e2e-gce/1280784994997899264/artifacts/_sig-storage_CSI_Volumes/_Driver_csi-hostpath_/_Testpattern_inline_ephemeral_CSI_volume_ephemeral/should_create_read-only_inline_ephemeral_volume/csi-hostpathplugin-0-hostpath.log
 | ||||
|  * | ||||
|  * inline drivers are assumed to be mount only (no block support) | ||||
|  * purposely there is no native support for size contraints | ||||
|  * | ||||
|  */ | ||||
| class EphemeralInlineContainerDOciDriver extends CsiBaseDriver { | ||||
|   constructor(ctx, options) { | ||||
|     super(...arguments); | ||||
| 
 | ||||
|     options = options || {}; | ||||
|     options.service = options.service || {}; | ||||
|     options.service.identity = options.service.identity || {}; | ||||
|     options.service.controller = options.service.controller || {}; | ||||
|     options.service.node = options.service.node || {}; | ||||
| 
 | ||||
|     options.service.identity.capabilities = | ||||
|       options.service.identity.capabilities || {}; | ||||
| 
 | ||||
|     options.service.controller.capabilities = | ||||
|       options.service.controller.capabilities || {}; | ||||
| 
 | ||||
|     options.service.node.capabilities = options.service.node.capabilities || {}; | ||||
| 
 | ||||
|     if (!("service" in options.service.identity.capabilities)) { | ||||
|       this.ctx.logger.debug("setting default identity service caps"); | ||||
| 
 | ||||
|       options.service.identity.capabilities.service = [ | ||||
|         "UNKNOWN", | ||||
|         //"CONTROLLER_SERVICE"
 | ||||
|         //"VOLUME_ACCESSIBILITY_CONSTRAINTS"
 | ||||
|       ]; | ||||
|     } | ||||
| 
 | ||||
|     if (!("volume_expansion" in options.service.identity.capabilities)) { | ||||
|       this.ctx.logger.debug("setting default identity volume_expansion caps"); | ||||
| 
 | ||||
|       options.service.identity.capabilities.volume_expansion = [ | ||||
|         "UNKNOWN", | ||||
|         //"ONLINE",
 | ||||
|         //"OFFLINE"
 | ||||
|       ]; | ||||
|     } | ||||
| 
 | ||||
|     if (!("rpc" in options.service.controller.capabilities)) { | ||||
|       this.ctx.logger.debug("setting default controller caps"); | ||||
| 
 | ||||
|       options.service.controller.capabilities.rpc = [ | ||||
|         //"UNKNOWN",
 | ||||
|         //"CREATE_DELETE_VOLUME",
 | ||||
|         //"PUBLISH_UNPUBLISH_VOLUME",
 | ||||
|         //"LIST_VOLUMES",
 | ||||
|         //"GET_CAPACITY",
 | ||||
|         //"CREATE_DELETE_SNAPSHOT",
 | ||||
|         //"LIST_SNAPSHOTS",
 | ||||
|         //"CLONE_VOLUME",
 | ||||
|         //"PUBLISH_READONLY",
 | ||||
|         //"EXPAND_VOLUME"
 | ||||
|       ]; | ||||
| 
 | ||||
|       if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { | ||||
|         options.service.controller.capabilities.rpc | ||||
|           .push | ||||
|           //"VOLUME_CONDITION",
 | ||||
|           //"GET_VOLUME"
 | ||||
|           (); | ||||
|       } | ||||
| 
 | ||||
|       if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { | ||||
|         options.service.controller.capabilities.rpc | ||||
|           .push | ||||
|           //"SINGLE_NODE_MULTI_WRITER"
 | ||||
|           (); | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     if (!("rpc" in options.service.node.capabilities)) { | ||||
|       this.ctx.logger.debug("setting default node caps"); | ||||
|       options.service.node.capabilities.rpc = [ | ||||
|         //"UNKNOWN",
 | ||||
|         //"STAGE_UNSTAGE_VOLUME",
 | ||||
|         "GET_VOLUME_STATS", | ||||
|         //"EXPAND_VOLUME",
 | ||||
|       ]; | ||||
| 
 | ||||
|       if (semver.satisfies(this.ctx.csiVersion, ">=1.3.0")) { | ||||
|         //options.service.node.capabilities.rpc.push("VOLUME_CONDITION");
 | ||||
|       } | ||||
| 
 | ||||
|       if (semver.satisfies(this.ctx.csiVersion, ">=1.5.0")) { | ||||
|         options.service.node.capabilities.rpc.push("SINGLE_NODE_MULTI_WRITER"); | ||||
|         /** | ||||
|          * This is for volumes that support a mount time gid such as smb or fat | ||||
|          */ | ||||
|         //options.service.node.capabilities.rpc.push("VOLUME_MOUNT_GROUP");
 | ||||
|       } | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * | ||||
|    * @returns CTR | ||||
|    */ | ||||
|   getCTR() { | ||||
|     return this.ctx.registry.get(`${__REGISTRY_NS__}:ctr`, () => { | ||||
|       const driver = this; | ||||
|       let options = _.get(driver.options, "containerd", {}); | ||||
|       options = options || {}; | ||||
|       return new CTR(options); | ||||
|     }); | ||||
|   } | ||||
| 
 | ||||
|   assertCapabilities(capabilities) { | ||||
|     this.ctx.logger.verbose("validating capabilities: %j", capabilities); | ||||
| 
 | ||||
|     let message = null; | ||||
|     //[{"access_mode":{"mode":"SINGLE_NODE_WRITER"},"mount":{"mount_flags":["noatime","_netdev"],"fs_type":"nfs"},"access_type":"mount"}]
 | ||||
|     const valid = capabilities.every((capability) => { | ||||
|       if (capability.access_type != "mount") { | ||||
|         message = `invalid access_type ${capability.access_type}`; | ||||
|         return false; | ||||
|       } | ||||
| 
 | ||||
|       if (capability.mount.fs_type) { | ||||
|         message = `invalid fs_type ${capability.mount.fs_type}`; | ||||
|         return false; | ||||
|       } | ||||
| 
 | ||||
|       if ( | ||||
|         capability.mount.mount_flags && | ||||
|         capability.mount.mount_flags.length > 0 | ||||
|       ) { | ||||
|         message = `invalid mount_flags ${capability.mount.mount_flags}`; | ||||
|         return false; | ||||
|       } | ||||
| 
 | ||||
|       if ( | ||||
|         ![ | ||||
|           "UNKNOWN", | ||||
|           "SINGLE_NODE_WRITER", | ||||
|           "SINGLE_NODE_SINGLE_WRITER", // added in v1.5.0
 | ||||
|           "SINGLE_NODE_MULTI_WRITER", // added in v1.5.0
 | ||||
|           "SINGLE_NODE_READER_ONLY", | ||||
|         ].includes(capability.access_mode.mode) | ||||
|       ) { | ||||
|         message = `invalid access_mode, ${capability.access_mode.mode}`; | ||||
|         return false; | ||||
|       } | ||||
| 
 | ||||
|       return true; | ||||
|     }); | ||||
| 
 | ||||
|     return { valid, message }; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * This should create a dataset with appropriate volume properties, ensuring | ||||
|    * the mountpoint is the target_path | ||||
|    * | ||||
|    * Any volume_context attributes starting with property.<name> will be set as zfs properties | ||||
|    *  | ||||
|    * { | ||||
|       "target_path": "/var/lib/kubelet/pods/f8b237db-19e8-44ae-b1d2-740c9aeea702/volumes/kubernetes.io~csi/my-volume-0/mount", | ||||
|       "volume_capability": { | ||||
|         "AccessType": { | ||||
|           "Mount": {} | ||||
|         }, | ||||
|         "access_mode": { | ||||
|           "mode": 1 | ||||
|         } | ||||
|       }, | ||||
|       "volume_context": { | ||||
|         "csi.storage.k8s.io/ephemeral": "true", | ||||
|         "csi.storage.k8s.io/pod.name": "inline-volume-tester-2ptb7", | ||||
|         "csi.storage.k8s.io/pod.namespace": "ephemeral-468", | ||||
|         "csi.storage.k8s.io/pod.uid": "f8b237db-19e8-44ae-b1d2-740c9aeea702", | ||||
|         "csi.storage.k8s.io/serviceAccount.name": "default", | ||||
|         "foo": "bar" | ||||
|       }, | ||||
|       "volume_id": "csi-8228252978a824126924de00126e6aec7c989a48a39d577bd3ab718647df5555" | ||||
|     } | ||||
|    * | ||||
|    * @param {*} call | ||||
|    */ | ||||
|   async NodePublishVolume(call) { | ||||
|     const driver = this; | ||||
|     const ctr = driver.getCTR(); | ||||
|     const filesystem = new Filesystem(); | ||||
|     const mount = new Mount(); | ||||
| 
 | ||||
|     const volume_id = call.request.volume_id; | ||||
|     const staging_target_path = call.request.staging_target_path || ""; | ||||
|     const target_path = call.request.target_path; | ||||
|     const capability = call.request.volume_capability; | ||||
|     const access_type = capability.access_type || "mount"; | ||||
|     const readonly = call.request.readonly; | ||||
|     const volume_context = call.request.volume_context; | ||||
| 
 | ||||
|     let result; | ||||
| 
 | ||||
|     let imageReference; | ||||
|     let imagePullPolicy; | ||||
|     let imagePlatform; | ||||
|     let imageUser; | ||||
|     let labels = {}; | ||||
|     Object.keys(volume_context).forEach(function (key) { | ||||
|       switch (key) { | ||||
|         case "image.reference": | ||||
|           imageReference = volume_context[key]; | ||||
|           break; | ||||
|         case "image.pullPolicy": | ||||
|           imagePullPolicy = volume_context[key]; | ||||
|           break; | ||||
|         case "image.platform": | ||||
|           imagePlatform = volume_context[key]; | ||||
|           break; | ||||
|         case "image.user": | ||||
|           imageUser = volume_context[key]; | ||||
|           break; | ||||
|       } | ||||
| 
 | ||||
|       if (key.startsWith("snapshot.label.")) { | ||||
|         labels[key.replace(/^snapshot\.label\./, "")] = volume_context[key]; | ||||
|       } | ||||
|     }); | ||||
| 
 | ||||
|     if (!imageReference) { | ||||
|       throw new GrpcError( | ||||
|         grpc.status.INVALID_ARGUMENT, | ||||
|         `image.reference is required` | ||||
|       ); | ||||
|     } | ||||
| 
 | ||||
|     if (!volume_id) { | ||||
|       throw new GrpcError( | ||||
|         grpc.status.INVALID_ARGUMENT, | ||||
|         `volume_id is required` | ||||
|       ); | ||||
|     } | ||||
| 
 | ||||
|     if (!target_path) { | ||||
|       throw new GrpcError( | ||||
|         grpc.status.INVALID_ARGUMENT, | ||||
|         `target_path is required` | ||||
|       ); | ||||
|     } | ||||
| 
 | ||||
|     if (capability) { | ||||
|       const result = driver.assertCapabilities([capability]); | ||||
| 
 | ||||
|       if (result.valid !== true) { | ||||
|         throw new GrpcError(grpc.status.INVALID_ARGUMENT, result.message); | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // create publish directory
 | ||||
|     if (!fs.existsSync(target_path)) { | ||||
|       await fs.mkdirSync(target_path, { recursive: true }); | ||||
|     } | ||||
| 
 | ||||
|     if (process.platform != "win32") { | ||||
|       result = await mount.pathIsMounted(target_path); | ||||
|       if (result) { | ||||
|         return {}; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // normalize image reference
 | ||||
|     let parsedImageReference = parseAll(imageReference); | ||||
|     //console.log(parsedImageReference);
 | ||||
| 
 | ||||
|     /** | ||||
|      *  const typesTemplates = { | ||||
|           'digest': ref => `${ref.digest}`, | ||||
|           'canonical': ref => `${ref.repositoryUrl}@${ref.digest}`, | ||||
|           'repository': ref => `${ref.repositoryUrl}`, | ||||
|           'tagged': ref => `${ref.repositoryUrl}:${ref.tag}`, | ||||
|           'dual': ref => `${ref.repositoryUrl}:${ref.tag}@${ref.digest}` | ||||
|         }; | ||||
|      *  | ||||
|      */ | ||||
|     switch (parsedImageReference.type) { | ||||
|       // repository is not enough for `ctr`
 | ||||
|       case "repository": | ||||
|         imageReference = `${imageReference}:latest`; | ||||
|         parsedImageReference = parseAll(imageReference); | ||||
|         break; | ||||
| 
 | ||||
|       case "canonical": | ||||
|       case "digest": | ||||
|       case "dual": | ||||
|       case "tagged": | ||||
|         break; | ||||
|     } | ||||
| 
 | ||||
|     driver.ctx.logger.debug( | ||||
|       `imageReference: ${JSON.stringify(parsedImageReference)}` | ||||
|     ); | ||||
| 
 | ||||
|     imageReference = parsedImageReference.toString(); | ||||
| 
 | ||||
|     // normalize image pull policy
 | ||||
|     if (!imagePullPolicy) { | ||||
|       imagePullPolicy = | ||||
|         parsedImageReference.type == "tagged" && | ||||
|         parsedImageReference.tag == "latest" | ||||
|           ? "Always" | ||||
|           : "IfNotPresent"; | ||||
|     } | ||||
| 
 | ||||
|     driver.ctx.logger.debug(`effective imagePullPolicy: ${imagePullPolicy}`); | ||||
| 
 | ||||
|     let doPull = true; | ||||
|     switch (String(imagePullPolicy).toLowerCase()) { | ||||
|       case "never": | ||||
|         doPull = false; | ||||
|         break; | ||||
|       case "always": | ||||
|         doPull = true; | ||||
|         break; | ||||
|       case "ifnotpresent": | ||||
|         try { | ||||
|           await ctr.imageInspect(imageReference); | ||||
|           doPull = false; | ||||
|         } catch (err) {} | ||||
|         break; | ||||
|     } | ||||
| 
 | ||||
|     if (doPull) { | ||||
|       let ctr_pull_args = []; | ||||
|       if (imagePlatform) { | ||||
|         ctr_pull_args.push("--platform", imagePlatform); | ||||
|       } | ||||
| 
 | ||||
|       if (imageUser) { | ||||
|         // TODO: decrypt as appropriate
 | ||||
|         // --user value, -u value           User[:password] Registry user and password
 | ||||
|         ctr_pull_args.push("--user", imageUser); | ||||
|       } | ||||
| 
 | ||||
|       await ctr.imagePull(imageReference, ctr_pull_args); | ||||
|     } | ||||
| 
 | ||||
|     let ctr_mount_args = []; | ||||
|     if (imagePlatform) { | ||||
|       ctr_mount_args.push("--platform", imagePlatform); | ||||
|     } | ||||
| 
 | ||||
|     if (Object.keys(labels).length > 0) { | ||||
|       for (const label in labels) { | ||||
|         ctr_mount_args.push("--label", `${label}=${labels[label]}`); | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // kubelet will manage readonly for us by bind mounting and ro, it is expected that the driver mounts rw
 | ||||
|     // if (!readonly) {
 | ||||
|     //   ctr_mount_args.push("--rw");
 | ||||
|     // }
 | ||||
|     ctr_mount_args.push("--rw"); | ||||
| 
 | ||||
|     await ctr.imageMount(imageReference, target_path, ctr_mount_args); | ||||
| 
 | ||||
|     return {}; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * This should destroy the dataset and remove target_path as appropriate | ||||
|    *  | ||||
|    *{ | ||||
|       "target_path": "/var/lib/kubelet/pods/f8b237db-19e8-44ae-b1d2-740c9aeea702/volumes/kubernetes.io~csi/my-volume-0/mount", | ||||
|       "volume_id": "csi-8228252978a824126924de00126e6aec7c989a48a39d577bd3ab718647df5555" | ||||
|     } | ||||
|    * | ||||
|    * @param {*} call | ||||
|    */ | ||||
|   async NodeUnpublishVolume(call) { | ||||
|     const driver = this; | ||||
|     const ctr = driver.getCTR(); | ||||
| 
 | ||||
|     const volume_id = call.request.volume_id; | ||||
|     const target_path = call.request.target_path; | ||||
| 
 | ||||
|     if (!volume_id) { | ||||
|       throw new GrpcError( | ||||
|         grpc.status.INVALID_ARGUMENT, | ||||
|         `volume_id is required` | ||||
|       ); | ||||
|     } | ||||
| 
 | ||||
|     if (!target_path) { | ||||
|       throw new GrpcError( | ||||
|         grpc.status.INVALID_ARGUMENT, | ||||
|         `target_path is required` | ||||
|       ); | ||||
|     } | ||||
| 
 | ||||
|     // unmount
 | ||||
|     await ctr.imageUnmount(target_path); | ||||
| 
 | ||||
|     // delete snapshot
 | ||||
|     try { | ||||
|       await ctr.snapshotDelete(target_path); | ||||
|     } catch (err) { | ||||
|       if (!err.stderr.includes("does not exist")) { | ||||
|         throw err; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     // cleanup publish directory
 | ||||
|     if (fs.existsSync(target_path) && fs.lstatSync(target_path).isDirectory()) { | ||||
|       fs.rmSync(target_path, { recursive: true }); | ||||
|     } | ||||
| 
 | ||||
|     return {}; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * TODO: consider volume_capabilities? | ||||
|    * | ||||
|    * @param {*} call | ||||
|    */ | ||||
|   async GetCapacity(call) { | ||||
|     const driver = this; | ||||
|     const zb = this.getZetabyte(); | ||||
| 
 | ||||
|     let datasetParentName = this.getVolumeParentDatasetName(); | ||||
| 
 | ||||
|     if (!datasetParentName) { | ||||
|       throw new GrpcError( | ||||
|         grpc.status.FAILED_PRECONDITION, | ||||
|         `invalid configuration: missing datasetParentName` | ||||
|       ); | ||||
|     } | ||||
| 
 | ||||
|     if (call.request.volume_capabilities) { | ||||
|       const result = this.assertCapabilities(call.request.volume_capabilities); | ||||
| 
 | ||||
|       if (result.valid !== true) { | ||||
|         return { available_capacity: 0 }; | ||||
|       } | ||||
|     } | ||||
| 
 | ||||
|     const datasetName = datasetParentName; | ||||
| 
 | ||||
|     let properties; | ||||
|     properties = await zb.zfs.get(datasetName, ["avail"]); | ||||
|     properties = properties[datasetName]; | ||||
| 
 | ||||
|     return { available_capacity: properties.available.value }; | ||||
|   } | ||||
| 
 | ||||
|   /** | ||||
|    * | ||||
|    * @param {*} call | ||||
|    */ | ||||
|   async ValidateVolumeCapabilities(call) { | ||||
|     const driver = this; | ||||
|     const result = this.assertCapabilities(call.request.volume_capabilities); | ||||
| 
 | ||||
|     if (result.valid !== true) { | ||||
|       return { message: result.message }; | ||||
|     } | ||||
| 
 | ||||
|     return { | ||||
|       confirmed: { | ||||
|         volume_context: call.request.volume_context, | ||||
|         volume_capabilities: call.request.volume_capabilities, // TODO: this is a bit crude, should return *ALL* capabilities, not just what was requested
 | ||||
|         parameters: call.request.parameters, | ||||
|       }, | ||||
|     }; | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| module.exports.EphemeralInlineContainerDOciDriver = | ||||
|   EphemeralInlineContainerDOciDriver; | ||||
|  | @ -14,6 +14,9 @@ const { ControllerSmbClientDriver } = require("./controller-smb-client"); | |||
| const { ControllerLustreClientDriver } = require("./controller-lustre-client"); | ||||
| const { ControllerObjectiveFSDriver } = require("./controller-objectivefs"); | ||||
| const { ControllerSynologyDriver } = require("./controller-synology"); | ||||
| const { | ||||
|   EphemeralInlineContainerDOciDriver, | ||||
| } = require("./ephemeral-inline-containerd-oci"); | ||||
| const { NodeManualDriver } = require("./node-manual"); | ||||
| 
 | ||||
| function factory(ctx, options) { | ||||
|  | @ -53,6 +56,8 @@ function factory(ctx, options) { | |||
|       return new ControllerLustreClientDriver(ctx, options); | ||||
|     case "objectivefs": | ||||
|       return new ControllerObjectiveFSDriver(ctx, options); | ||||
|     case "containerd-oci-ephemeral-inline": | ||||
|       return new EphemeralInlineContainerDOciDriver(ctx, options); | ||||
|     case "node-manual": | ||||
|       return new NodeManualDriver(ctx, options); | ||||
|     default: | ||||
|  |  | |||
|  | @ -0,0 +1,176 @@ | |||
| const cp = require("child_process"); | ||||
| 
 | ||||
| class CTR { | ||||
|   constructor(options = {}) { | ||||
|     const ctr = this; | ||||
|     ctr.options = options; | ||||
| 
 | ||||
|     options.containerd = options.containerd || {}; | ||||
|     if (process.platform != "win32" && options.containerd.address) { | ||||
|       //options.containerd.address = "/run/containerd/containerd.sock";
 | ||||
|       //options.containerd.address;
 | ||||
|     } | ||||
| 
 | ||||
|     if (process.platform == "win32" && options.containerd.windowsAddress) { | ||||
|       // --address value, -a value    Address for containerd's GRPC server (default: "\\\\.\\pipe\\containerd-containerd") [%CONTAINERD_ADDRESS%]
 | ||||
|       options.containerd.address = options.containerd.windowsAddress; | ||||
|     } | ||||
| 
 | ||||
|     if (!options.containerd.namespace) { | ||||
|       //options.containerd.namespace = "default";
 | ||||
|     } | ||||
| 
 | ||||
|     options.paths = options.paths || {}; | ||||
|     if (!options.paths.ctr) { | ||||
|       options.paths.ctr = "ctr"; | ||||
|     } | ||||
| 
 | ||||
|     if (!options.paths.sudo) { | ||||
|       options.paths.sudo = "/usr/bin/sudo"; | ||||
|     } | ||||
| 
 | ||||
|     if (!options.executor) { | ||||
|       options.executor = { | ||||
|         spawn: cp.spawn, | ||||
|       }; | ||||
|     } | ||||
| 
 | ||||
|     if (!options.env) { | ||||
|       options.env = {}; | ||||
|     } | ||||
| 
 | ||||
|     if (ctr.options.logger) { | ||||
|       ctr.logger = ctr.options.logger; | ||||
|     } else { | ||||
|       ctr.logger = console; | ||||
|       console.verbose = function () { | ||||
|         console.log(...arguments); | ||||
|       }; | ||||
|     } | ||||
|   } | ||||
| 
 | ||||
|   async info() { | ||||
|     const ctr = this; | ||||
|     let args = ["info"]; | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result.parsed; | ||||
|   } | ||||
| 
 | ||||
|   // ctr images pull "${IMAGE}"
 | ||||
|   async imagePull(image, args = []) { | ||||
|     const ctr = this; | ||||
|     args.unshift("images", "pull"); | ||||
|     args.push(image); | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result.parsed; | ||||
|   } | ||||
| 
 | ||||
|   // ctr images mount --rw "${IMAGE}" "${MOUNT_TARGET}"
 | ||||
|   async imageMount(image, target, args = []) { | ||||
|     const ctr = this; | ||||
|     args.unshift("images", "mount"); | ||||
|     args.push(image, target); | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result; | ||||
|   } | ||||
| 
 | ||||
|   // ctr images unmount "${MOUNT_TARGET}"
 | ||||
|   async imageUnmount(target, args = []) { | ||||
|     const ctr = this; | ||||
|     args.unshift("images", "unmount"); | ||||
|     args.push(target); | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result; | ||||
|   } | ||||
| 
 | ||||
|   // ctr image inspect docker.io/library/ubuntu:latest
 | ||||
|   async imageInspect(image, args = []) { | ||||
|     const ctr = this; | ||||
|     args.unshift("images", "inspect"); | ||||
|     args.push(image); | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result; | ||||
|   } | ||||
| 
 | ||||
|   async snapshotList(args = []) { | ||||
|     const ctr = this; | ||||
|     args.unshift("snapshot", "list"); | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result; | ||||
|   } | ||||
| 
 | ||||
|   // ctr snapshots delete [command options] <key> [<key>, ...]
 | ||||
|   async snapshotDelete(key) { | ||||
|     const ctr = this; | ||||
|     let args = ["snapshot", "delete"]; | ||||
|     args.push(key); | ||||
|     let result = await ctr.exec(ctr.options.paths.ctr, args); | ||||
|     return result; | ||||
|   } | ||||
| 
 | ||||
|   exec(command, args, options = {}) { | ||||
|     // if (!options.hasOwnProperty("timeout")) {
 | ||||
|     //   options.timeout = DEFAULT_TIMEOUT;
 | ||||
|     // }
 | ||||
| 
 | ||||
|     const ctr = this; | ||||
|     args = args || []; | ||||
| 
 | ||||
|     // --debug
 | ||||
| 
 | ||||
|     if (process.platform != "win32" && ctr.options.sudo) { | ||||
|       args.unshift(command); | ||||
|       command = ctr.options.paths.sudo; | ||||
|     } | ||||
| 
 | ||||
|     options.env = { ...{}, ...ctr.options.env, ...options.env }; | ||||
| 
 | ||||
|     if (ctr.options.containerd.address) { | ||||
|       options.env.CONTAINERD_ADDRESS = ctr.options.containerd.address; | ||||
|     } | ||||
| 
 | ||||
|     if (ctr.options.containerd.namespace) { | ||||
|       options.env.CONTAINERD_NAMESPACE = ctr.options.containerd.namespace; | ||||
|     } | ||||
| 
 | ||||
|     options.env.PATH = process.env.PATH; | ||||
| 
 | ||||
|     ctr.logger.verbose("executing ctr command: %s %s", command, args.join(" ")); | ||||
| 
 | ||||
|     return new Promise((resolve, reject) => { | ||||
|       const child = ctr.options.executor.spawn(command, args, options); | ||||
| 
 | ||||
|       let stdout = ""; | ||||
|       let stderr = ""; | ||||
| 
 | ||||
|       child.stdout.on("data", function (data) { | ||||
|         stdout = stdout + data; | ||||
|       }); | ||||
| 
 | ||||
|       child.stderr.on("data", function (data) { | ||||
|         stderr = stderr + data; | ||||
|       }); | ||||
| 
 | ||||
|       child.on("close", function (code) { | ||||
|         const result = { code, stdout, stderr, timeout: false }; | ||||
|         try { | ||||
|           result.parsed = JSON.parse(result.stdout); | ||||
|         } catch (err) {} | ||||
| 
 | ||||
|         // timeout scenario
 | ||||
|         if (code === null) { | ||||
|           result.timeout = true; | ||||
|           reject(result); | ||||
|         } | ||||
| 
 | ||||
|         if (code) { | ||||
|           reject(result); | ||||
|         } else { | ||||
|           resolve(result); | ||||
|         } | ||||
|       }); | ||||
|     }); | ||||
|   } | ||||
| } | ||||
| 
 | ||||
| module.exports.CTR = CTR; | ||||
		Loading…
	
		Reference in New Issue